tailq: remove unneeded inclusions
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67 #include <rte_string_fns.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while(0)
87 #define PROC_PRIMARY_OR_RET() do { \
88         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
89                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
90                 return; \
91         } \
92 } while(0)
93
94 /* Macros to check for invlaid function pointers in dev_ops structure */
95 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
96         if ((func) == NULL) { \
97                 PMD_DEBUG_TRACE("Function not supported\n"); \
98                 return (retval); \
99         } \
100 } while(0)
101 #define FUNC_PTR_OR_RET(func) do { \
102         if ((func) == NULL) { \
103                 PMD_DEBUG_TRACE("Function not supported\n"); \
104                 return; \
105         } \
106 } while(0)
107
108 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
109 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
110 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
111 static uint8_t nb_ports = 0;
112
113 /* spinlock for eth device callbacks */
114 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
115
116 /* store statistics names and its offset in stats structure  */
117 struct rte_eth_xstats_name_off {
118         char name[RTE_ETH_XSTATS_NAME_SIZE];
119         unsigned offset;
120 };
121
122 static struct rte_eth_xstats_name_off rte_stats_strings[] = {
123          {"rx_packets", offsetof(struct rte_eth_stats, ipackets)},
124          {"tx_packets", offsetof(struct rte_eth_stats, opackets)},
125          {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)},
126          {"tx_bytes", offsetof(struct rte_eth_stats, obytes)},
127          {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
128          {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
129          {"rx_crc_errors", offsetof(struct rte_eth_stats, ibadcrc)},
130          {"rx_bad_length_errors", offsetof(struct rte_eth_stats, ibadlen)},
131          {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
132          {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)},
133          {"fdir_match", offsetof(struct rte_eth_stats, fdirmatch)},
134          {"fdir_miss", offsetof(struct rte_eth_stats, fdirmiss)},
135          {"tx_flow_control_xon", offsetof(struct rte_eth_stats, tx_pause_xon)},
136          {"rx_flow_control_xon", offsetof(struct rte_eth_stats, rx_pause_xon)},
137          {"tx_flow_control_xoff", offsetof(struct rte_eth_stats, tx_pause_xoff)},
138          {"rx_flow_control_xoff", offsetof(struct rte_eth_stats, rx_pause_xoff)},
139 };
140 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
141
142 static struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
143         {"rx_packets", offsetof(struct rte_eth_stats, q_ipackets)},
144         {"rx_bytes", offsetof(struct rte_eth_stats, q_ibytes)},
145 };
146 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
147                 sizeof(rte_rxq_stats_strings[0]))
148
149 static struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
150         {"tx_packets", offsetof(struct rte_eth_stats, q_opackets)},
151         {"tx_bytes", offsetof(struct rte_eth_stats, q_obytes)},
152         {"tx_errors", offsetof(struct rte_eth_stats, q_errors)},
153 };
154 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
155                 sizeof(rte_txq_stats_strings[0]))
156
157
158 /**
159  * The user application callback description.
160  *
161  * It contains callback address to be registered by user application,
162  * the pointer to the parameters for callback, and the event type.
163  */
164 struct rte_eth_dev_callback {
165         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
166         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
167         void *cb_arg;                           /**< Parameter for callback */
168         enum rte_eth_event_type event;          /**< Interrupt event type */
169         uint32_t active;                        /**< Callback is executing */
170 };
171
172 enum {
173         STAT_QMAP_TX = 0,
174         STAT_QMAP_RX
175 };
176
177 enum {
178         DEV_DETACHED = 0,
179         DEV_ATTACHED
180 };
181
182 static inline void
183 rte_eth_dev_data_alloc(void)
184 {
185         const unsigned flags = 0;
186         const struct rte_memzone *mz;
187
188         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
189                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
190                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
191                                 rte_socket_id(), flags);
192         } else
193                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
194         if (mz == NULL)
195                 rte_panic("Cannot allocate memzone for ethernet port data\n");
196
197         rte_eth_dev_data = mz->addr;
198         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
199                 memset(rte_eth_dev_data, 0,
200                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
201 }
202
203 struct rte_eth_dev *
204 rte_eth_dev_allocated(const char *name)
205 {
206         unsigned i;
207
208         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
209                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
210                     strcmp(rte_eth_devices[i].data->name, name) == 0)
211                         return &rte_eth_devices[i];
212         }
213         return NULL;
214 }
215
216 static uint8_t
217 rte_eth_dev_find_free_port(void)
218 {
219         unsigned i;
220
221         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
222                 if (rte_eth_devices[i].attached == DEV_DETACHED)
223                         return i;
224         }
225         return RTE_MAX_ETHPORTS;
226 }
227
228 struct rte_eth_dev *
229 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
230 {
231         uint8_t port_id;
232         struct rte_eth_dev *eth_dev;
233
234         port_id = rte_eth_dev_find_free_port();
235         if (port_id == RTE_MAX_ETHPORTS) {
236                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
237                 return NULL;
238         }
239
240         if (rte_eth_dev_data == NULL)
241                 rte_eth_dev_data_alloc();
242
243         if (rte_eth_dev_allocated(name) != NULL) {
244                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n", name);
245                 return NULL;
246         }
247
248         eth_dev = &rte_eth_devices[port_id];
249         eth_dev->data = &rte_eth_dev_data[port_id];
250         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
251         eth_dev->data->port_id = port_id;
252         eth_dev->attached = DEV_ATTACHED;
253         eth_dev->dev_type = type;
254         nb_ports++;
255         return eth_dev;
256 }
257
258 static inline int
259 rte_eth_dev_create_unique_device_name(char *name, size_t size,
260                 struct rte_pci_device *pci_dev)
261 {
262         int ret;
263
264         if ((name == NULL) || (pci_dev == NULL))
265                 return -EINVAL;
266
267         ret = snprintf(name, size, "%d:%d.%d",
268                         pci_dev->addr.bus, pci_dev->addr.devid,
269                         pci_dev->addr.function);
270         if (ret < 0)
271                 return ret;
272         return 0;
273 }
274
275 int
276 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
277 {
278         if (eth_dev == NULL)
279                 return -EINVAL;
280
281         eth_dev->attached = 0;
282         nb_ports--;
283         return 0;
284 }
285
286 static int
287 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
288                  struct rte_pci_device *pci_dev)
289 {
290         struct eth_driver    *eth_drv;
291         struct rte_eth_dev *eth_dev;
292         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
293
294         int diag;
295
296         eth_drv = (struct eth_driver *)pci_drv;
297
298         /* Create unique Ethernet device name using PCI address */
299         rte_eth_dev_create_unique_device_name(ethdev_name,
300                         sizeof(ethdev_name), pci_dev);
301
302         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
303         if (eth_dev == NULL)
304                 return -ENOMEM;
305
306         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
307                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
308                                   eth_drv->dev_private_size,
309                                   RTE_CACHE_LINE_SIZE);
310                 if (eth_dev->data->dev_private == NULL)
311                         rte_panic("Cannot allocate memzone for private port data\n");
312         }
313         eth_dev->pci_dev = pci_dev;
314         eth_dev->driver = eth_drv;
315         eth_dev->data->rx_mbuf_alloc_failed = 0;
316
317         /* init user callbacks */
318         TAILQ_INIT(&(eth_dev->link_intr_cbs));
319
320         /*
321          * Set the default MTU.
322          */
323         eth_dev->data->mtu = ETHER_MTU;
324
325         /* Invoke PMD device initialization function */
326         diag = (*eth_drv->eth_dev_init)(eth_dev);
327         if (diag == 0)
328                 return (0);
329
330         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
331                         " failed\n", pci_drv->name,
332                         (unsigned) pci_dev->id.vendor_id,
333                         (unsigned) pci_dev->id.device_id);
334         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
335                 rte_free(eth_dev->data->dev_private);
336         eth_dev->attached = DEV_DETACHED;
337         nb_ports--;
338         return diag;
339 }
340
341 static int
342 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
343 {
344         const struct eth_driver *eth_drv;
345         struct rte_eth_dev *eth_dev;
346         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
347         int ret;
348
349         if (pci_dev == NULL)
350                 return -EINVAL;
351
352         /* Create unique Ethernet device name using PCI address */
353         rte_eth_dev_create_unique_device_name(ethdev_name,
354                         sizeof(ethdev_name), pci_dev);
355
356         eth_dev = rte_eth_dev_allocated(ethdev_name);
357         if (eth_dev == NULL)
358                 return -ENODEV;
359
360         eth_drv = (const struct eth_driver *)pci_dev->driver;
361
362         /* Invoke PMD device uninit function */
363         if (*eth_drv->eth_dev_uninit) {
364                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
365                 if (ret)
366                         return ret;
367         }
368
369         /* free ether device */
370         rte_eth_dev_release_port(eth_dev);
371
372         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
373                 rte_free(eth_dev->data->dev_private);
374
375         eth_dev->pci_dev = NULL;
376         eth_dev->driver = NULL;
377         eth_dev->data = NULL;
378
379         return 0;
380 }
381
382 /**
383  * Register an Ethernet [Poll Mode] driver.
384  *
385  * Function invoked by the initialization function of an Ethernet driver
386  * to simultaneously register itself as a PCI driver and as an Ethernet
387  * Poll Mode Driver.
388  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
389  * structure embedded in the *eth_drv* structure, after having stored the
390  * address of the rte_eth_dev_init() function in the *devinit* field of
391  * the *pci_drv* structure.
392  * During the PCI probing phase, the rte_eth_dev_init() function is
393  * invoked for each PCI [Ethernet device] matching the embedded PCI
394  * identifiers provided by the driver.
395  */
396 void
397 rte_eth_driver_register(struct eth_driver *eth_drv)
398 {
399         eth_drv->pci_drv.devinit = rte_eth_dev_init;
400         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
401         rte_eal_pci_register(&eth_drv->pci_drv);
402 }
403
404 static int
405 rte_eth_dev_is_valid_port(uint8_t port_id)
406 {
407         if (port_id >= RTE_MAX_ETHPORTS ||
408             rte_eth_devices[port_id].attached != DEV_ATTACHED)
409                 return 0;
410         else
411                 return 1;
412 }
413
414 int
415 rte_eth_dev_socket_id(uint8_t port_id)
416 {
417         if (!rte_eth_dev_is_valid_port(port_id))
418                 return -1;
419         return rte_eth_devices[port_id].pci_dev->numa_node;
420 }
421
422 uint8_t
423 rte_eth_dev_count(void)
424 {
425         return (nb_ports);
426 }
427
428 /* So far, DPDK hotplug function only supports linux */
429 #ifdef RTE_LIBRTE_EAL_HOTPLUG
430
431 static enum rte_eth_dev_type
432 rte_eth_dev_get_device_type(uint8_t port_id)
433 {
434         if (!rte_eth_dev_is_valid_port(port_id))
435                 return RTE_ETH_DEV_UNKNOWN;
436         return rte_eth_devices[port_id].dev_type;
437 }
438
439 static int
440 rte_eth_dev_save(struct rte_eth_dev *devs, size_t size)
441 {
442         if ((devs == NULL) ||
443             (size != sizeof(struct rte_eth_dev) * RTE_MAX_ETHPORTS))
444                 return -EINVAL;
445
446         /* save current rte_eth_devices */
447         memcpy(devs, rte_eth_devices, size);
448         return 0;
449 }
450
451 static int
452 rte_eth_dev_get_changed_port(struct rte_eth_dev *devs, uint8_t *port_id)
453 {
454         if ((devs == NULL) || (port_id == NULL))
455                 return -EINVAL;
456
457         /* check which port was attached or detached */
458         for (*port_id = 0; *port_id < RTE_MAX_ETHPORTS; (*port_id)++, devs++) {
459                 if (rte_eth_devices[*port_id].attached ^ devs->attached)
460                         return 0;
461         }
462         return -ENODEV;
463 }
464
465 static int
466 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
467 {
468         if (!rte_eth_dev_is_valid_port(port_id)) {
469                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
470                 return -EINVAL;
471         }
472
473         if (addr == NULL) {
474                 PMD_DEBUG_TRACE("Null pointer is specified\n");
475                 return -EINVAL;
476         }
477
478         *addr = rte_eth_devices[port_id].pci_dev->addr;
479         return 0;
480 }
481
482 static int
483 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
484 {
485         char *tmp;
486
487         if (!rte_eth_dev_is_valid_port(port_id)) {
488                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
489                 return -EINVAL;
490         }
491
492         if (name == NULL) {
493                 PMD_DEBUG_TRACE("Null pointer is specified\n");
494                 return -EINVAL;
495         }
496
497         /* shouldn't check 'rte_eth_devices[i].data',
498          * because it might be overwritten by VDEV PMD */
499         tmp = rte_eth_dev_data[port_id].name;
500         strcpy(name, tmp);
501         return 0;
502 }
503
504 static int
505 rte_eth_dev_is_detachable(uint8_t port_id)
506 {
507         uint32_t drv_flags;
508
509         if (port_id >= RTE_MAX_ETHPORTS) {
510                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
511                 return -EINVAL;
512         }
513
514         if (rte_eth_devices[port_id].dev_type == RTE_ETH_DEV_PCI) {
515                 switch (rte_eth_devices[port_id].pci_dev->pt_driver) {
516                 case RTE_PT_IGB_UIO:
517                 case RTE_PT_UIO_GENERIC:
518                         break;
519                 case RTE_PT_VFIO:
520                 default:
521                         return -ENOTSUP;
522                 }
523         }
524
525         drv_flags = rte_eth_devices[port_id].driver->pci_drv.drv_flags;
526         return !(drv_flags & RTE_PCI_DRV_DETACHABLE);
527 }
528
529 /* attach the new physical device, then store port_id of the device */
530 static int
531 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
532 {
533         uint8_t new_port_id;
534         struct rte_eth_dev devs[RTE_MAX_ETHPORTS];
535
536         if ((addr == NULL) || (port_id == NULL))
537                 goto err;
538
539         /* save current port status */
540         if (rte_eth_dev_save(devs, sizeof(devs)))
541                 goto err;
542         /* re-construct pci_device_list */
543         if (rte_eal_pci_scan())
544                 goto err;
545         /* invoke probe func of the driver can handle the new device.
546          * TODO:
547          * rte_eal_pci_probe_one() should return port_id.
548          * And rte_eth_dev_save() and rte_eth_dev_get_changed_port()
549          * should be removed. */
550         if (rte_eal_pci_probe_one(addr))
551                 goto err;
552         /* get port_id enabled by above procedures */
553         if (rte_eth_dev_get_changed_port(devs, &new_port_id))
554                 goto err;
555
556         *port_id = new_port_id;
557         return 0;
558 err:
559         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
560         return -1;
561 }
562
563 /* detach the new physical device, then store pci_addr of the device */
564 static int
565 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
566 {
567         struct rte_pci_addr freed_addr;
568         struct rte_pci_addr vp;
569
570         if (addr == NULL)
571                 goto err;
572
573         /* check whether the driver supports detach feature, or not */
574         if (rte_eth_dev_is_detachable(port_id))
575                 goto err;
576
577         /* get pci address by port id */
578         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
579                 goto err;
580
581         /* Zerod pci addr means the port comes from virtual device */
582         vp.domain = vp.bus = vp.devid = vp.function = 0;
583         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
584                 goto err;
585
586         /* invoke close func of the driver,
587          * also remove the device from pci_device_list */
588         if (rte_eal_pci_close_one(&freed_addr))
589                 goto err;
590
591         *addr = freed_addr;
592         return 0;
593 err:
594         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
595         return -1;
596 }
597
598 /* attach the new virtual device, then store port_id of the device */
599 static int
600 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
601 {
602         char *name = NULL, *args = NULL;
603         uint8_t new_port_id;
604         struct rte_eth_dev devs[RTE_MAX_ETHPORTS];
605         int ret = -1;
606
607         if ((vdevargs == NULL) || (port_id == NULL))
608                 goto end;
609
610         /* parse vdevargs, then retrieve device name and args */
611         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
612                 goto end;
613
614         /* save current port status */
615         if (rte_eth_dev_save(devs, sizeof(devs)))
616                 goto end;
617         /* walk around dev_driver_list to find the driver of the device,
618          * then invoke probe function o the driver.
619          * TODO:
620          * rte_eal_vdev_init() should return port_id,
621          * And rte_eth_dev_save() and rte_eth_dev_get_changed_port()
622          * should be removed. */
623         if (rte_eal_vdev_init(name, args))
624                 goto end;
625         /* get port_id enabled by above procedures */
626         if (rte_eth_dev_get_changed_port(devs, &new_port_id))
627                 goto end;
628         ret = 0;
629         *port_id = new_port_id;
630 end:
631         if (name)
632                 free(name);
633         if (args)
634                 free(args);
635
636         if (ret < 0)
637                 RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
638         return ret;
639 }
640
641 /* detach the new virtual device, then store the name of the device */
642 static int
643 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
644 {
645         char name[RTE_ETH_NAME_MAX_LEN];
646
647         if (vdevname == NULL)
648                 goto err;
649
650         /* check whether the driver supports detach feature, or not */
651         if (rte_eth_dev_is_detachable(port_id))
652                 goto err;
653
654         /* get device name by port id */
655         if (rte_eth_dev_get_name_by_port(port_id, name))
656                 goto err;
657         /* walk around dev_driver_list to find the driver of the device,
658          * then invoke close function o the driver */
659         if (rte_eal_vdev_uninit(name))
660                 goto err;
661
662         strncpy(vdevname, name, sizeof(name));
663         return 0;
664 err:
665         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
666         return -1;
667 }
668
669 /* attach the new device, then store port_id of the device */
670 int
671 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
672 {
673         struct rte_pci_addr addr;
674
675         if ((devargs == NULL) || (port_id == NULL))
676                 return -EINVAL;
677
678         if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
679                 return rte_eth_dev_attach_pdev(&addr, port_id);
680         else
681                 return rte_eth_dev_attach_vdev(devargs, port_id);
682 }
683
684 /* detach the device, then store the name of the device */
685 int
686 rte_eth_dev_detach(uint8_t port_id, char *name)
687 {
688         struct rte_pci_addr addr;
689         int ret;
690
691         if (name == NULL)
692                 return -EINVAL;
693
694         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
695                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
696                 if (ret < 0)
697                         return ret;
698
699                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
700                 if (ret == 0)
701                         snprintf(name, RTE_ETH_NAME_MAX_LEN,
702                                 "%04x:%02x:%02x.%d",
703                                 addr.domain, addr.bus,
704                                 addr.devid, addr.function);
705
706                 return ret;
707         } else
708                 return rte_eth_dev_detach_vdev(port_id, name);
709 }
710 #else /* RTE_LIBRTE_EAL_HOTPLUG */
711 int
712 rte_eth_dev_attach(const char *devargs __rte_unused,
713                         uint8_t *port_id __rte_unused)
714 {
715         RTE_LOG(ERR, EAL, "Hotplug support isn't enabled\n");
716         return -1;
717 }
718
719 /* detach the device, then store the name of the device */
720 int
721 rte_eth_dev_detach(uint8_t port_id __rte_unused,
722                         char *name __rte_unused)
723 {
724         RTE_LOG(ERR, EAL, "Hotplug support isn't enabled\n");
725         return -1;
726 }
727 #endif /* RTE_LIBRTE_EAL_HOTPLUG */
728
729 static int
730 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
731 {
732         uint16_t old_nb_queues = dev->data->nb_rx_queues;
733         void **rxq;
734         unsigned i;
735
736         if (dev->data->rx_queues == NULL) { /* first time configuration */
737                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
738                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
739                                 RTE_CACHE_LINE_SIZE);
740                 if (dev->data->rx_queues == NULL) {
741                         dev->data->nb_rx_queues = 0;
742                         return -(ENOMEM);
743                 }
744 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
745                 dev->post_rx_burst_cbs = rte_zmalloc(
746                         "ethdev->post_rx_burst_cbs",
747                         sizeof(*dev->post_rx_burst_cbs) * nb_queues,
748                         RTE_CACHE_LINE_SIZE);
749                 if (dev->post_rx_burst_cbs == NULL) {
750                         rte_free(dev->data->rx_queues);
751                         dev->data->rx_queues = NULL;
752                         dev->data->nb_rx_queues = 0;
753                         return -ENOMEM;
754                 }
755 #endif
756
757         } else { /* re-configure */
758                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
759
760                 rxq = dev->data->rx_queues;
761
762                 for (i = nb_queues; i < old_nb_queues; i++)
763                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
764                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
765                                 RTE_CACHE_LINE_SIZE);
766                 if (rxq == NULL)
767                         return -(ENOMEM);
768 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
769                 dev->post_rx_burst_cbs = rte_realloc(
770                         dev->post_rx_burst_cbs,
771                         sizeof(*dev->post_rx_burst_cbs) *
772                                 nb_queues, RTE_CACHE_LINE_SIZE);
773                 if (dev->post_rx_burst_cbs == NULL)
774                         return -ENOMEM;
775 #endif
776                 if (nb_queues > old_nb_queues) {
777                         uint16_t new_qs = nb_queues - old_nb_queues;
778                         memset(rxq + old_nb_queues, 0,
779                                 sizeof(rxq[0]) * new_qs);
780 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
781                         memset(dev->post_rx_burst_cbs + old_nb_queues, 0,
782                                 sizeof(dev->post_rx_burst_cbs[0]) * new_qs);
783 #endif
784                 }
785
786                 dev->data->rx_queues = rxq;
787
788         }
789         dev->data->nb_rx_queues = nb_queues;
790         return (0);
791 }
792
793 int
794 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
795 {
796         struct rte_eth_dev *dev;
797
798         /* This function is only safe when called from the primary process
799          * in a multi-process setup*/
800         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
801
802         if (!rte_eth_dev_is_valid_port(port_id)) {
803                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
804                 return -EINVAL;
805         }
806
807         dev = &rte_eth_devices[port_id];
808         if (rx_queue_id >= dev->data->nb_rx_queues) {
809                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
810                 return -EINVAL;
811         }
812
813         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
814
815         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
816
817 }
818
819 int
820 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
821 {
822         struct rte_eth_dev *dev;
823
824         /* This function is only safe when called from the primary process
825          * in a multi-process setup*/
826         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
827
828         if (!rte_eth_dev_is_valid_port(port_id)) {
829                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
830                 return -EINVAL;
831         }
832
833         dev = &rte_eth_devices[port_id];
834         if (rx_queue_id >= dev->data->nb_rx_queues) {
835                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
836                 return -EINVAL;
837         }
838
839         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
840
841         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
842
843 }
844
845 int
846 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
847 {
848         struct rte_eth_dev *dev;
849
850         /* This function is only safe when called from the primary process
851          * in a multi-process setup*/
852         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
853
854         if (!rte_eth_dev_is_valid_port(port_id)) {
855                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
856                 return -EINVAL;
857         }
858
859         dev = &rte_eth_devices[port_id];
860         if (tx_queue_id >= dev->data->nb_tx_queues) {
861                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
862                 return -EINVAL;
863         }
864
865         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
866
867         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
868
869 }
870
871 int
872 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
873 {
874         struct rte_eth_dev *dev;
875
876         /* This function is only safe when called from the primary process
877          * in a multi-process setup*/
878         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
879
880         if (!rte_eth_dev_is_valid_port(port_id)) {
881                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
882                 return -EINVAL;
883         }
884
885         dev = &rte_eth_devices[port_id];
886         if (tx_queue_id >= dev->data->nb_tx_queues) {
887                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
888                 return -EINVAL;
889         }
890
891         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
892
893         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
894
895 }
896
897 static int
898 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
899 {
900         uint16_t old_nb_queues = dev->data->nb_tx_queues;
901         void **txq;
902         unsigned i;
903
904         if (dev->data->tx_queues == NULL) { /* first time configuration */
905                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
906                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
907                                 RTE_CACHE_LINE_SIZE);
908                 if (dev->data->tx_queues == NULL) {
909                         dev->data->nb_tx_queues = 0;
910                         return -(ENOMEM);
911                 }
912 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
913                 dev->pre_tx_burst_cbs = rte_zmalloc(
914                         "ethdev->pre_tx_burst_cbs",
915                         sizeof(*dev->pre_tx_burst_cbs) * nb_queues,
916                         RTE_CACHE_LINE_SIZE);
917                 if (dev->pre_tx_burst_cbs == NULL) {
918                         rte_free(dev->data->tx_queues);
919                         dev->data->tx_queues = NULL;
920                         dev->data->nb_tx_queues = 0;
921                         return -ENOMEM;
922                 }
923 #endif
924
925         } else { /* re-configure */
926                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
927
928                 txq = dev->data->tx_queues;
929
930                 for (i = nb_queues; i < old_nb_queues; i++)
931                         (*dev->dev_ops->tx_queue_release)(txq[i]);
932                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
933                                 RTE_CACHE_LINE_SIZE);
934                 if (txq == NULL)
935                         return -ENOMEM;
936 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
937                 dev->pre_tx_burst_cbs = rte_realloc(
938                         dev->pre_tx_burst_cbs,
939                         sizeof(*dev->pre_tx_burst_cbs) *
940                                 nb_queues, RTE_CACHE_LINE_SIZE);
941                 if (dev->pre_tx_burst_cbs == NULL)
942                         return -ENOMEM;
943 #endif
944                 if (nb_queues > old_nb_queues) {
945                         uint16_t new_qs = nb_queues - old_nb_queues;
946                         memset(txq + old_nb_queues, 0,
947                                 sizeof(txq[0]) * new_qs);
948 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
949                         memset(dev->pre_tx_burst_cbs + old_nb_queues, 0,
950                                 sizeof(dev->pre_tx_burst_cbs[0]) * new_qs);
951 #endif
952                 }
953
954                 dev->data->tx_queues = txq;
955
956         }
957         dev->data->nb_tx_queues = nb_queues;
958         return (0);
959 }
960
961 static int
962 rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
963 {
964         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
965         switch (nb_rx_q) {
966         case 1:
967         case 2:
968                 RTE_ETH_DEV_SRIOV(dev).active =
969                         ETH_64_POOLS;
970                 break;
971         case 4:
972                 RTE_ETH_DEV_SRIOV(dev).active =
973                         ETH_32_POOLS;
974                 break;
975         default:
976                 return -EINVAL;
977         }
978
979         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
980         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
981                 dev->pci_dev->max_vfs * nb_rx_q;
982
983         return 0;
984 }
985
986 static int
987 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
988                       const struct rte_eth_conf *dev_conf)
989 {
990         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
991
992         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
993                 /* check multi-queue mode */
994                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
995                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
996                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
997                         /* SRIOV only works in VMDq enable mode */
998                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
999                                         " SRIOV active, "
1000                                         "wrong VMDQ mq_mode rx %u tx %u\n",
1001                                         port_id,
1002                                         dev_conf->rxmode.mq_mode,
1003                                         dev_conf->txmode.mq_mode);
1004                         return (-EINVAL);
1005                 }
1006
1007                 switch (dev_conf->rxmode.mq_mode) {
1008                 case ETH_MQ_RX_VMDQ_DCB:
1009                 case ETH_MQ_RX_VMDQ_DCB_RSS:
1010                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1011                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
1012                                         " SRIOV active, "
1013                                         "unsupported VMDQ mq_mode rx %u\n",
1014                                         port_id, dev_conf->rxmode.mq_mode);
1015                         return (-EINVAL);
1016                 case ETH_MQ_RX_RSS:
1017                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
1018                                         " SRIOV active, "
1019                                         "Rx mq mode is changed from:"
1020                                         "mq_mode %u into VMDQ mq_mode %u\n",
1021                                         port_id,
1022                                         dev_conf->rxmode.mq_mode,
1023                                         dev->data->dev_conf.rxmode.mq_mode);
1024                 case ETH_MQ_RX_VMDQ_RSS:
1025                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1026                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1027                                 if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) {
1028                                         PMD_DEBUG_TRACE("ethdev port_id=%d"
1029                                                 " SRIOV active, invalid queue"
1030                                                 " number for VMDQ RSS, allowed"
1031                                                 " value are 1, 2 or 4\n",
1032                                                 port_id);
1033                                         return -EINVAL;
1034                                 }
1035                         break;
1036                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
1037                         /* if nothing mq mode configure, use default scheme */
1038                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
1039                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
1040                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1041                         break;
1042                 }
1043
1044                 switch (dev_conf->txmode.mq_mode) {
1045                 case ETH_MQ_TX_VMDQ_DCB:
1046                         /* DCB VMDQ in SRIOV mode, not implement yet */
1047                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
1048                                         " SRIOV active, "
1049                                         "unsupported VMDQ mq_mode tx %u\n",
1050                                         port_id, dev_conf->txmode.mq_mode);
1051                         return (-EINVAL);
1052                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1053                         /* if nothing mq mode configure, use default scheme */
1054                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
1055                         break;
1056                 }
1057
1058                 /* check valid queue number */
1059                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1060                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1061                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
1062                                     "queue number must less equal to %d\n",
1063                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1064                         return (-EINVAL);
1065                 }
1066         } else {
1067                 /* For vmdb+dcb mode check our configuration before we go further */
1068                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1069                         const struct rte_eth_vmdq_dcb_conf *conf;
1070
1071                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
1072                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
1073                                                 "!= %d\n",
1074                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
1075                                 return (-EINVAL);
1076                         }
1077                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
1078                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
1079                                conf->nb_queue_pools == ETH_32_POOLS)) {
1080                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
1081                                                 "nb_queue_pools must be %d or %d\n",
1082                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
1083                                 return (-EINVAL);
1084                         }
1085                 }
1086                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1087                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1088
1089                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
1090                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
1091                                                 "!= %d\n",
1092                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
1093                                 return (-EINVAL);
1094                         }
1095                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
1096                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
1097                                conf->nb_queue_pools == ETH_32_POOLS)) {
1098                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
1099                                                 "nb_queue_pools != %d or nb_queue_pools "
1100                                                 "!= %d\n",
1101                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
1102                                 return (-EINVAL);
1103                         }
1104                 }
1105
1106                 /* For DCB mode check our configuration before we go further */
1107                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1108                         const struct rte_eth_dcb_rx_conf *conf;
1109
1110                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
1111                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
1112                                                 "!= %d\n",
1113                                                 port_id, ETH_DCB_NUM_QUEUES);
1114                                 return (-EINVAL);
1115                         }
1116                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
1117                         if (! (conf->nb_tcs == ETH_4_TCS ||
1118                                conf->nb_tcs == ETH_8_TCS)) {
1119                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
1120                                                 "nb_tcs != %d or nb_tcs "
1121                                                 "!= %d\n",
1122                                                 port_id, ETH_4_TCS, ETH_8_TCS);
1123                                 return (-EINVAL);
1124                         }
1125                 }
1126
1127                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1128                         const struct rte_eth_dcb_tx_conf *conf;
1129
1130                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
1131                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
1132                                                 "!= %d\n",
1133                                                 port_id, ETH_DCB_NUM_QUEUES);
1134                                 return (-EINVAL);
1135                         }
1136                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
1137                         if (! (conf->nb_tcs == ETH_4_TCS ||
1138                                conf->nb_tcs == ETH_8_TCS)) {
1139                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
1140                                                 "nb_tcs != %d or nb_tcs "
1141                                                 "!= %d\n",
1142                                                 port_id, ETH_4_TCS, ETH_8_TCS);
1143                                 return (-EINVAL);
1144                         }
1145                 }
1146         }
1147         return 0;
1148 }
1149
1150 int
1151 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1152                       const struct rte_eth_conf *dev_conf)
1153 {
1154         struct rte_eth_dev *dev;
1155         struct rte_eth_dev_info dev_info;
1156         int diag;
1157
1158         /* This function is only safe when called from the primary process
1159          * in a multi-process setup*/
1160         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1161
1162         if (!rte_eth_dev_is_valid_port(port_id)) {
1163                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1164                 return (-EINVAL);
1165         }
1166
1167         dev = &rte_eth_devices[port_id];
1168
1169         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1170         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1171
1172         if (dev->data->dev_started) {
1173                 PMD_DEBUG_TRACE(
1174                     "port %d must be stopped to allow configuration\n", port_id);
1175                 return (-EBUSY);
1176         }
1177
1178         /*
1179          * Check that the numbers of RX and TX queues are not greater
1180          * than the maximum number of RX and TX queues supported by the
1181          * configured device.
1182          */
1183         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1184         if (nb_rx_q > dev_info.max_rx_queues) {
1185                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1186                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1187                 return (-EINVAL);
1188         }
1189         if (nb_rx_q == 0) {
1190                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
1191                 return (-EINVAL);
1192         }
1193
1194         if (nb_tx_q > dev_info.max_tx_queues) {
1195                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1196                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1197                 return (-EINVAL);
1198         }
1199         if (nb_tx_q == 0) {
1200                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
1201                 return (-EINVAL);
1202         }
1203
1204         /* Copy the dev_conf parameter into the dev structure */
1205         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
1206
1207         /*
1208          * If link state interrupt is enabled, check that the
1209          * device supports it.
1210          */
1211         if (dev_conf->intr_conf.lsc == 1) {
1212                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
1213
1214                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
1215                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1216                                         pci_drv->name);
1217                         return (-EINVAL);
1218                 }
1219         }
1220
1221         /*
1222          * If jumbo frames are enabled, check that the maximum RX packet
1223          * length is supported by the configured device.
1224          */
1225         if (dev_conf->rxmode.jumbo_frame == 1) {
1226                 if (dev_conf->rxmode.max_rx_pkt_len >
1227                     dev_info.max_rx_pktlen) {
1228                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1229                                 " > max valid value %u\n",
1230                                 port_id,
1231                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1232                                 (unsigned)dev_info.max_rx_pktlen);
1233                         return (-EINVAL);
1234                 }
1235                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1236                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1237                                 " < min valid value %u\n",
1238                                 port_id,
1239                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1240                                 (unsigned)ETHER_MIN_LEN);
1241                         return (-EINVAL);
1242                 }
1243         } else {
1244                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1245                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1246                         /* Use default value */
1247                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1248                                                         ETHER_MAX_LEN;
1249         }
1250
1251         /* multipe queue mode checking */
1252         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
1253         if (diag != 0) {
1254                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
1255                                 port_id, diag);
1256                 return diag;
1257         }
1258
1259         /*
1260          * Setup new number of RX/TX queues and reconfigure device.
1261          */
1262         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1263         if (diag != 0) {
1264                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1265                                 port_id, diag);
1266                 return diag;
1267         }
1268
1269         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1270         if (diag != 0) {
1271                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1272                                 port_id, diag);
1273                 rte_eth_dev_rx_queue_config(dev, 0);
1274                 return diag;
1275         }
1276
1277         diag = (*dev->dev_ops->dev_configure)(dev);
1278         if (diag != 0) {
1279                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1280                                 port_id, diag);
1281                 rte_eth_dev_rx_queue_config(dev, 0);
1282                 rte_eth_dev_tx_queue_config(dev, 0);
1283                 return diag;
1284         }
1285
1286         return 0;
1287 }
1288
1289 static void
1290 rte_eth_dev_config_restore(uint8_t port_id)
1291 {
1292         struct rte_eth_dev *dev;
1293         struct rte_eth_dev_info dev_info;
1294         struct ether_addr addr;
1295         uint16_t i;
1296         uint32_t pool = 0;
1297
1298         dev = &rte_eth_devices[port_id];
1299
1300         rte_eth_dev_info_get(port_id, &dev_info);
1301
1302         if (RTE_ETH_DEV_SRIOV(dev).active)
1303                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1304
1305         /* replay MAC address configuration */
1306         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1307                 addr = dev->data->mac_addrs[i];
1308
1309                 /* skip zero address */
1310                 if (is_zero_ether_addr(&addr))
1311                         continue;
1312
1313                 /* add address to the hardware */
1314                 if  (*dev->dev_ops->mac_addr_add &&
1315                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1316                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1317                 else {
1318                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1319                                         port_id);
1320                         /* exit the loop but not return an error */
1321                         break;
1322                 }
1323         }
1324
1325         /* replay promiscuous configuration */
1326         if (rte_eth_promiscuous_get(port_id) == 1)
1327                 rte_eth_promiscuous_enable(port_id);
1328         else if (rte_eth_promiscuous_get(port_id) == 0)
1329                 rte_eth_promiscuous_disable(port_id);
1330
1331         /* replay allmulticast configuration */
1332         if (rte_eth_allmulticast_get(port_id) == 1)
1333                 rte_eth_allmulticast_enable(port_id);
1334         else if (rte_eth_allmulticast_get(port_id) == 0)
1335                 rte_eth_allmulticast_disable(port_id);
1336 }
1337
1338 int
1339 rte_eth_dev_start(uint8_t port_id)
1340 {
1341         struct rte_eth_dev *dev;
1342         int diag;
1343
1344         /* This function is only safe when called from the primary process
1345          * in a multi-process setup*/
1346         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1347
1348         if (!rte_eth_dev_is_valid_port(port_id)) {
1349                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1350                 return (-EINVAL);
1351         }
1352
1353         dev = &rte_eth_devices[port_id];
1354
1355         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1356
1357         if (dev->data->dev_started != 0) {
1358                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1359                         " already started\n",
1360                         port_id);
1361                 return (0);
1362         }
1363
1364         diag = (*dev->dev_ops->dev_start)(dev);
1365         if (diag == 0)
1366                 dev->data->dev_started = 1;
1367         else
1368                 return diag;
1369
1370         rte_eth_dev_config_restore(port_id);
1371
1372         if (dev->data->dev_conf.intr_conf.lsc != 0) {
1373                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1374                 (*dev->dev_ops->link_update)(dev, 0);
1375         }
1376         return 0;
1377 }
1378
1379 void
1380 rte_eth_dev_stop(uint8_t port_id)
1381 {
1382         struct rte_eth_dev *dev;
1383
1384         /* This function is only safe when called from the primary process
1385          * in a multi-process setup*/
1386         PROC_PRIMARY_OR_RET();
1387
1388         if (!rte_eth_dev_is_valid_port(port_id)) {
1389                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1390                 return;
1391         }
1392
1393         dev = &rte_eth_devices[port_id];
1394
1395         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1396
1397         if (dev->data->dev_started == 0) {
1398                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1399                         " already stopped\n",
1400                         port_id);
1401                 return;
1402         }
1403
1404         dev->data->dev_started = 0;
1405         (*dev->dev_ops->dev_stop)(dev);
1406 }
1407
1408 int
1409 rte_eth_dev_set_link_up(uint8_t port_id)
1410 {
1411         struct rte_eth_dev *dev;
1412
1413         /* This function is only safe when called from the primary process
1414          * in a multi-process setup*/
1415         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1416
1417         if (!rte_eth_dev_is_valid_port(port_id)) {
1418                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1419                 return -EINVAL;
1420         }
1421
1422         dev = &rte_eth_devices[port_id];
1423
1424         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1425         return (*dev->dev_ops->dev_set_link_up)(dev);
1426 }
1427
1428 int
1429 rte_eth_dev_set_link_down(uint8_t port_id)
1430 {
1431         struct rte_eth_dev *dev;
1432
1433         /* This function is only safe when called from the primary process
1434          * in a multi-process setup*/
1435         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1436
1437         if (!rte_eth_dev_is_valid_port(port_id)) {
1438                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1439                 return -EINVAL;
1440         }
1441
1442         dev = &rte_eth_devices[port_id];
1443
1444         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1445         return (*dev->dev_ops->dev_set_link_down)(dev);
1446 }
1447
1448 void
1449 rte_eth_dev_close(uint8_t port_id)
1450 {
1451         struct rte_eth_dev *dev;
1452
1453         /* This function is only safe when called from the primary process
1454          * in a multi-process setup*/
1455         PROC_PRIMARY_OR_RET();
1456
1457         if (!rte_eth_dev_is_valid_port(port_id)) {
1458                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1459                 return;
1460         }
1461
1462         dev = &rte_eth_devices[port_id];
1463
1464         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1465         dev->data->dev_started = 0;
1466         (*dev->dev_ops->dev_close)(dev);
1467 }
1468
1469 int
1470 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1471                        uint16_t nb_rx_desc, unsigned int socket_id,
1472                        const struct rte_eth_rxconf *rx_conf,
1473                        struct rte_mempool *mp)
1474 {
1475         int ret;
1476         uint32_t mbp_buf_size;
1477         struct rte_eth_dev *dev;
1478         struct rte_pktmbuf_pool_private *mbp_priv;
1479         struct rte_eth_dev_info dev_info;
1480
1481         /* This function is only safe when called from the primary process
1482          * in a multi-process setup*/
1483         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1484
1485         if (!rte_eth_dev_is_valid_port(port_id)) {
1486                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1487                 return (-EINVAL);
1488         }
1489
1490         dev = &rte_eth_devices[port_id];
1491         if (rx_queue_id >= dev->data->nb_rx_queues) {
1492                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1493                 return (-EINVAL);
1494         }
1495
1496         if (dev->data->dev_started) {
1497                 PMD_DEBUG_TRACE(
1498                     "port %d must be stopped to allow configuration\n", port_id);
1499                 return -EBUSY;
1500         }
1501
1502         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1503         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1504
1505         /*
1506          * Check the size of the mbuf data buffer.
1507          * This value must be provided in the private data of the memory pool.
1508          * First check that the memory pool has a valid private data.
1509          */
1510         rte_eth_dev_info_get(port_id, &dev_info);
1511         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1512                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1513                                 mp->name, (int) mp->private_data_size,
1514                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1515                 return (-ENOSPC);
1516         }
1517         mbp_priv = rte_mempool_get_priv(mp);
1518         mbp_buf_size = mbp_priv->mbuf_data_room_size;
1519
1520         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1521                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1522                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1523                                 "=%d)\n",
1524                                 mp->name,
1525                                 (int)mbp_buf_size,
1526                                 (int)(RTE_PKTMBUF_HEADROOM +
1527                                       dev_info.min_rx_bufsize),
1528                                 (int)RTE_PKTMBUF_HEADROOM,
1529                                 (int)dev_info.min_rx_bufsize);
1530                 return (-EINVAL);
1531         }
1532
1533         if (rx_conf == NULL)
1534                 rx_conf = &dev_info.default_rxconf;
1535
1536         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1537                                               socket_id, rx_conf, mp);
1538         if (!ret) {
1539                 if (!dev->data->min_rx_buf_size ||
1540                     dev->data->min_rx_buf_size > mbp_buf_size)
1541                         dev->data->min_rx_buf_size = mbp_buf_size;
1542         }
1543
1544         return ret;
1545 }
1546
1547 int
1548 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1549                        uint16_t nb_tx_desc, unsigned int socket_id,
1550                        const struct rte_eth_txconf *tx_conf)
1551 {
1552         struct rte_eth_dev *dev;
1553         struct rte_eth_dev_info dev_info;
1554
1555         /* This function is only safe when called from the primary process
1556          * in a multi-process setup*/
1557         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1558
1559         if (!rte_eth_dev_is_valid_port(port_id)) {
1560                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1561                 return (-EINVAL);
1562         }
1563
1564         dev = &rte_eth_devices[port_id];
1565         if (tx_queue_id >= dev->data->nb_tx_queues) {
1566                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1567                 return (-EINVAL);
1568         }
1569
1570         if (dev->data->dev_started) {
1571                 PMD_DEBUG_TRACE(
1572                     "port %d must be stopped to allow configuration\n", port_id);
1573                 return -EBUSY;
1574         }
1575
1576         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1577         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1578
1579         rte_eth_dev_info_get(port_id, &dev_info);
1580
1581         if (tx_conf == NULL)
1582                 tx_conf = &dev_info.default_txconf;
1583
1584         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1585                                                socket_id, tx_conf);
1586 }
1587
1588 void
1589 rte_eth_promiscuous_enable(uint8_t port_id)
1590 {
1591         struct rte_eth_dev *dev;
1592
1593         if (!rte_eth_dev_is_valid_port(port_id)) {
1594                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1595                 return;
1596         }
1597
1598         dev = &rte_eth_devices[port_id];
1599
1600         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1601         (*dev->dev_ops->promiscuous_enable)(dev);
1602         dev->data->promiscuous = 1;
1603 }
1604
1605 void
1606 rte_eth_promiscuous_disable(uint8_t port_id)
1607 {
1608         struct rte_eth_dev *dev;
1609
1610         if (!rte_eth_dev_is_valid_port(port_id)) {
1611                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1612                 return;
1613         }
1614
1615         dev = &rte_eth_devices[port_id];
1616
1617         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1618         dev->data->promiscuous = 0;
1619         (*dev->dev_ops->promiscuous_disable)(dev);
1620 }
1621
1622 int
1623 rte_eth_promiscuous_get(uint8_t port_id)
1624 {
1625         struct rte_eth_dev *dev;
1626
1627         if (!rte_eth_dev_is_valid_port(port_id)) {
1628                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1629                 return -1;
1630         }
1631
1632         dev = &rte_eth_devices[port_id];
1633         return dev->data->promiscuous;
1634 }
1635
1636 void
1637 rte_eth_allmulticast_enable(uint8_t port_id)
1638 {
1639         struct rte_eth_dev *dev;
1640
1641         if (!rte_eth_dev_is_valid_port(port_id)) {
1642                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1643                 return;
1644         }
1645
1646         dev = &rte_eth_devices[port_id];
1647
1648         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1649         (*dev->dev_ops->allmulticast_enable)(dev);
1650         dev->data->all_multicast = 1;
1651 }
1652
1653 void
1654 rte_eth_allmulticast_disable(uint8_t port_id)
1655 {
1656         struct rte_eth_dev *dev;
1657
1658         if (!rte_eth_dev_is_valid_port(port_id)) {
1659                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1660                 return;
1661         }
1662
1663         dev = &rte_eth_devices[port_id];
1664
1665         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1666         dev->data->all_multicast = 0;
1667         (*dev->dev_ops->allmulticast_disable)(dev);
1668 }
1669
1670 int
1671 rte_eth_allmulticast_get(uint8_t port_id)
1672 {
1673         struct rte_eth_dev *dev;
1674
1675         if (!rte_eth_dev_is_valid_port(port_id)) {
1676                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1677                 return -1;
1678         }
1679
1680         dev = &rte_eth_devices[port_id];
1681         return dev->data->all_multicast;
1682 }
1683
1684 static inline int
1685 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1686                                 struct rte_eth_link *link)
1687 {
1688         struct rte_eth_link *dst = link;
1689         struct rte_eth_link *src = &(dev->data->dev_link);
1690
1691         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1692                                         *(uint64_t *)src) == 0)
1693                 return -1;
1694
1695         return 0;
1696 }
1697
1698 void
1699 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1700 {
1701         struct rte_eth_dev *dev;
1702
1703         if (!rte_eth_dev_is_valid_port(port_id)) {
1704                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1705                 return;
1706         }
1707
1708         dev = &rte_eth_devices[port_id];
1709
1710         if (dev->data->dev_conf.intr_conf.lsc != 0)
1711                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1712         else {
1713                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1714                 (*dev->dev_ops->link_update)(dev, 1);
1715                 *eth_link = dev->data->dev_link;
1716         }
1717 }
1718
1719 void
1720 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1721 {
1722         struct rte_eth_dev *dev;
1723
1724         if (!rte_eth_dev_is_valid_port(port_id)) {
1725                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1726                 return;
1727         }
1728
1729         dev = &rte_eth_devices[port_id];
1730
1731         if (dev->data->dev_conf.intr_conf.lsc != 0)
1732                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1733         else {
1734                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1735                 (*dev->dev_ops->link_update)(dev, 0);
1736                 *eth_link = dev->data->dev_link;
1737         }
1738 }
1739
1740 int
1741 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1742 {
1743         struct rte_eth_dev *dev;
1744
1745         if (!rte_eth_dev_is_valid_port(port_id)) {
1746                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1747                 return (-ENODEV);
1748         }
1749
1750         dev = &rte_eth_devices[port_id];
1751         memset(stats, 0, sizeof(*stats));
1752
1753         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1754         (*dev->dev_ops->stats_get)(dev, stats);
1755         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1756         return 0;
1757 }
1758
1759 void
1760 rte_eth_stats_reset(uint8_t port_id)
1761 {
1762         struct rte_eth_dev *dev;
1763
1764         if (!rte_eth_dev_is_valid_port(port_id)) {
1765                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1766                 return;
1767         }
1768
1769         dev = &rte_eth_devices[port_id];
1770
1771         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1772         (*dev->dev_ops->stats_reset)(dev);
1773 }
1774
1775 /* retrieve ethdev extended statistics */
1776 int
1777 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1778         unsigned n)
1779 {
1780         struct rte_eth_stats eth_stats;
1781         struct rte_eth_dev *dev;
1782         unsigned count, i, q;
1783         uint64_t val;
1784         char *stats_ptr;
1785
1786         if (!rte_eth_dev_is_valid_port(port_id)) {
1787                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1788                 return -1;
1789         }
1790
1791         dev = &rte_eth_devices[port_id];
1792
1793         /* implemented by the driver */
1794         if (dev->dev_ops->xstats_get != NULL)
1795                 return (*dev->dev_ops->xstats_get)(dev, xstats, n);
1796
1797         /* else, return generic statistics */
1798         count = RTE_NB_STATS;
1799         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1800         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1801         if (n < count)
1802                 return count;
1803
1804         /* now fill the xstats structure */
1805
1806         count = 0;
1807         memset(&eth_stats, 0, sizeof(eth_stats));
1808         rte_eth_stats_get(port_id, &eth_stats);
1809
1810         /* global stats */
1811         for (i = 0; i < RTE_NB_STATS; i++) {
1812                 stats_ptr = (char *)&eth_stats + rte_stats_strings[i].offset;
1813                 val = *(uint64_t *)stats_ptr;
1814                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1815                         "%s", rte_stats_strings[i].name);
1816                 xstats[count++].value = val;
1817         }
1818
1819         /* per-rxq stats */
1820         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1821                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1822                         stats_ptr = (char *)&eth_stats;
1823                         stats_ptr += rte_rxq_stats_strings[i].offset;
1824                         stats_ptr += q * sizeof(uint64_t);
1825                         val = *(uint64_t *)stats_ptr;
1826                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1827                                 "rx_queue_%u_%s", q,
1828                                 rte_rxq_stats_strings[i].name);
1829                         xstats[count++].value = val;
1830                 }
1831         }
1832
1833         /* per-txq stats */
1834         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1835                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1836                         stats_ptr = (char *)&eth_stats;
1837                         stats_ptr += rte_txq_stats_strings[i].offset;
1838                         stats_ptr += q * sizeof(uint64_t);
1839                         val = *(uint64_t *)stats_ptr;
1840                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1841                                 "tx_queue_%u_%s", q,
1842                                 rte_txq_stats_strings[i].name);
1843                         xstats[count++].value = val;
1844                 }
1845         }
1846
1847         return count;
1848 }
1849
1850 /* reset ethdev extended statistics */
1851 void
1852 rte_eth_xstats_reset(uint8_t port_id)
1853 {
1854         struct rte_eth_dev *dev;
1855
1856         if (!rte_eth_dev_is_valid_port(port_id)) {
1857                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1858                 return;
1859         }
1860
1861         dev = &rte_eth_devices[port_id];
1862
1863         /* implemented by the driver */
1864         if (dev->dev_ops->xstats_reset != NULL) {
1865                 (*dev->dev_ops->xstats_reset)(dev);
1866                 return;
1867         }
1868
1869         /* fallback to default */
1870         rte_eth_stats_reset(port_id);
1871 }
1872
1873 static int
1874 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1875                 uint8_t is_rx)
1876 {
1877         struct rte_eth_dev *dev;
1878
1879         if (!rte_eth_dev_is_valid_port(port_id)) {
1880                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1881                 return -ENODEV;
1882         }
1883
1884         dev = &rte_eth_devices[port_id];
1885
1886         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1887         return (*dev->dev_ops->queue_stats_mapping_set)
1888                         (dev, queue_id, stat_idx, is_rx);
1889 }
1890
1891
1892 int
1893 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1894                 uint8_t stat_idx)
1895 {
1896         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1897                         STAT_QMAP_TX);
1898 }
1899
1900
1901 int
1902 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1903                 uint8_t stat_idx)
1904 {
1905         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1906                         STAT_QMAP_RX);
1907 }
1908
1909
1910 void
1911 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1912 {
1913         struct rte_eth_dev *dev;
1914
1915         if (!rte_eth_dev_is_valid_port(port_id)) {
1916                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1917                 return;
1918         }
1919
1920         dev = &rte_eth_devices[port_id];
1921
1922         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1923
1924         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1925         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1926         dev_info->pci_dev = dev->pci_dev;
1927         if (dev->driver)
1928                 dev_info->driver_name = dev->driver->pci_drv.name;
1929 }
1930
1931 void
1932 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1933 {
1934         struct rte_eth_dev *dev;
1935
1936         if (!rte_eth_dev_is_valid_port(port_id)) {
1937                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1938                 return;
1939         }
1940
1941         dev = &rte_eth_devices[port_id];
1942         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1943 }
1944
1945
1946 int
1947 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1948 {
1949         struct rte_eth_dev *dev;
1950
1951         if (!rte_eth_dev_is_valid_port(port_id)) {
1952                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1953                 return (-ENODEV);
1954         }
1955
1956         dev = &rte_eth_devices[port_id];
1957         *mtu = dev->data->mtu;
1958         return 0;
1959 }
1960
1961 int
1962 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1963 {
1964         int ret;
1965         struct rte_eth_dev *dev;
1966
1967         if (!rte_eth_dev_is_valid_port(port_id)) {
1968                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1969                 return (-ENODEV);
1970         }
1971
1972         dev = &rte_eth_devices[port_id];
1973         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1974
1975         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1976         if (!ret)
1977                 dev->data->mtu = mtu;
1978
1979         return ret;
1980 }
1981
1982 int
1983 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1984 {
1985         struct rte_eth_dev *dev;
1986
1987         if (!rte_eth_dev_is_valid_port(port_id)) {
1988                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1989                 return (-ENODEV);
1990         }
1991
1992         dev = &rte_eth_devices[port_id];
1993         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1994                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1995                 return (-ENOSYS);
1996         }
1997
1998         if (vlan_id > 4095) {
1999                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2000                                 port_id, (unsigned) vlan_id);
2001                 return (-EINVAL);
2002         }
2003         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2004
2005         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2006 }
2007
2008 int
2009 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
2010 {
2011         struct rte_eth_dev *dev;
2012
2013         if (!rte_eth_dev_is_valid_port(port_id)) {
2014                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2015                 return (-ENODEV);
2016         }
2017
2018         dev = &rte_eth_devices[port_id];
2019         if (rx_queue_id >= dev->data->nb_rx_queues) {
2020                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2021                 return (-EINVAL);
2022         }
2023
2024         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2025         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2026
2027         return (0);
2028 }
2029
2030 int
2031 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
2032 {
2033         struct rte_eth_dev *dev;
2034
2035         if (!rte_eth_dev_is_valid_port(port_id)) {
2036                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2037                 return (-ENODEV);
2038         }
2039
2040         dev = &rte_eth_devices[port_id];
2041         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2042         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
2043
2044         return (0);
2045 }
2046
2047 int
2048 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
2049 {
2050         struct rte_eth_dev *dev;
2051         int ret = 0;
2052         int mask = 0;
2053         int cur, org = 0;
2054
2055         if (!rte_eth_dev_is_valid_port(port_id)) {
2056                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2057                 return (-ENODEV);
2058         }
2059
2060         dev = &rte_eth_devices[port_id];
2061
2062         /*check which option changed by application*/
2063         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2064         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
2065         if (cur != org){
2066                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
2067                 mask |= ETH_VLAN_STRIP_MASK;
2068         }
2069
2070         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2071         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
2072         if (cur != org){
2073                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
2074                 mask |= ETH_VLAN_FILTER_MASK;
2075         }
2076
2077         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2078         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
2079         if (cur != org){
2080                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
2081                 mask |= ETH_VLAN_EXTEND_MASK;
2082         }
2083
2084         /*no change*/
2085         if(mask == 0)
2086                 return ret;
2087
2088         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2089         (*dev->dev_ops->vlan_offload_set)(dev, mask);
2090
2091         return ret;
2092 }
2093
2094 int
2095 rte_eth_dev_get_vlan_offload(uint8_t port_id)
2096 {
2097         struct rte_eth_dev *dev;
2098         int ret = 0;
2099
2100         if (!rte_eth_dev_is_valid_port(port_id)) {
2101                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2102                 return (-ENODEV);
2103         }
2104
2105         dev = &rte_eth_devices[port_id];
2106
2107         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2108                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
2109
2110         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2111                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
2112
2113         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2114                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
2115
2116         return ret;
2117 }
2118
2119 int
2120 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
2121 {
2122         struct rte_eth_dev *dev;
2123
2124         if (!rte_eth_dev_is_valid_port(port_id)) {
2125                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2126                 return (-ENODEV);
2127         }
2128
2129         dev = &rte_eth_devices[port_id];
2130         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2131         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
2132
2133         return 0;
2134 }
2135
2136 int
2137 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
2138                                       struct rte_fdir_filter *fdir_filter,
2139                                       uint8_t queue)
2140 {
2141         struct rte_eth_dev *dev;
2142
2143         if (!rte_eth_dev_is_valid_port(port_id)) {
2144                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2145                 return (-ENODEV);
2146         }
2147
2148         dev = &rte_eth_devices[port_id];
2149
2150         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2151                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2152                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2153                 return (-ENOSYS);
2154         }
2155
2156         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2157              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2158             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2159                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2160                                 "None l4type, source & destinations ports " \
2161                                 "should be null!\n");
2162                 return (-EINVAL);
2163         }
2164
2165         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
2166         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
2167                                                                 queue);
2168 }
2169
2170 int
2171 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
2172                                          struct rte_fdir_filter *fdir_filter,
2173                                          uint8_t queue)
2174 {
2175         struct rte_eth_dev *dev;
2176
2177         if (!rte_eth_dev_is_valid_port(port_id)) {
2178                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2179                 return (-ENODEV);
2180         }
2181
2182         dev = &rte_eth_devices[port_id];
2183
2184         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2185                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2186                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2187                 return (-ENOSYS);
2188         }
2189
2190         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2191              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2192             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2193                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2194                                 "None l4type, source & destinations ports " \
2195                                 "should be null!\n");
2196                 return (-EINVAL);
2197         }
2198
2199         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
2200         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
2201                                                                 queue);
2202
2203 }
2204
2205 int
2206 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
2207                                          struct rte_fdir_filter *fdir_filter)
2208 {
2209         struct rte_eth_dev *dev;
2210
2211         if (!rte_eth_dev_is_valid_port(port_id)) {
2212                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2213                 return (-ENODEV);
2214         }
2215
2216         dev = &rte_eth_devices[port_id];
2217
2218         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2219                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2220                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2221                 return (-ENOSYS);
2222         }
2223
2224         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2225              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2226             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2227                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2228                                 "None l4type source & destinations ports " \
2229                                 "should be null!\n");
2230                 return (-EINVAL);
2231         }
2232
2233         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
2234         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
2235 }
2236
2237 int
2238 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
2239 {
2240         struct rte_eth_dev *dev;
2241
2242         if (!rte_eth_dev_is_valid_port(port_id)) {
2243                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2244                 return (-ENODEV);
2245         }
2246
2247         dev = &rte_eth_devices[port_id];
2248         if (! (dev->data->dev_conf.fdir_conf.mode)) {
2249                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2250                 return (-ENOSYS);
2251         }
2252
2253         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
2254
2255         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
2256         return (0);
2257 }
2258
2259 int
2260 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
2261                                     struct rte_fdir_filter *fdir_filter,
2262                                     uint16_t soft_id, uint8_t queue,
2263                                     uint8_t drop)
2264 {
2265         struct rte_eth_dev *dev;
2266
2267         if (!rte_eth_dev_is_valid_port(port_id)) {
2268                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2269                 return (-ENODEV);
2270         }
2271
2272         dev = &rte_eth_devices[port_id];
2273
2274         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2275                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2276                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2277                 return (-ENOSYS);
2278         }
2279
2280         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2281              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2282             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2283                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2284                                 "None l4type, source & destinations ports " \
2285                                 "should be null!\n");
2286                 return (-EINVAL);
2287         }
2288
2289         /* For now IPv6 is not supported with perfect filter */
2290         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2291                 return (-ENOTSUP);
2292
2293         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
2294         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
2295                                                                 soft_id, queue,
2296                                                                 drop);
2297 }
2298
2299 int
2300 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
2301                                        struct rte_fdir_filter *fdir_filter,
2302                                        uint16_t soft_id, uint8_t queue,
2303                                        uint8_t drop)
2304 {
2305         struct rte_eth_dev *dev;
2306
2307         if (!rte_eth_dev_is_valid_port(port_id)) {
2308                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2309                 return (-ENODEV);
2310         }
2311
2312         dev = &rte_eth_devices[port_id];
2313
2314         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2315                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2316                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2317                 return (-ENOSYS);
2318         }
2319
2320         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2321              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2322             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2323                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2324                                 "None l4type, source & destinations ports " \
2325                                 "should be null!\n");
2326                 return (-EINVAL);
2327         }
2328
2329         /* For now IPv6 is not supported with perfect filter */
2330         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2331                 return (-ENOTSUP);
2332
2333         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
2334         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
2335                                                         soft_id, queue, drop);
2336 }
2337
2338 int
2339 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
2340                                        struct rte_fdir_filter *fdir_filter,
2341                                        uint16_t soft_id)
2342 {
2343         struct rte_eth_dev *dev;
2344
2345         if (!rte_eth_dev_is_valid_port(port_id)) {
2346                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2347                 return (-ENODEV);
2348         }
2349
2350         dev = &rte_eth_devices[port_id];
2351
2352         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2353                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2354                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2355                 return (-ENOSYS);
2356         }
2357
2358         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2359              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2360             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2361                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2362                                 "None l4type, source & destinations ports " \
2363                                 "should be null!\n");
2364                 return (-EINVAL);
2365         }
2366
2367         /* For now IPv6 is not supported with perfect filter */
2368         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2369                 return (-ENOTSUP);
2370
2371         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
2372         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
2373                                                                 soft_id);
2374 }
2375
2376 int
2377 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
2378 {
2379         struct rte_eth_dev *dev;
2380
2381         if (!rte_eth_dev_is_valid_port(port_id)) {
2382                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2383                 return (-ENODEV);
2384         }
2385
2386         dev = &rte_eth_devices[port_id];
2387         if (! (dev->data->dev_conf.fdir_conf.mode)) {
2388                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2389                 return (-ENOSYS);
2390         }
2391
2392         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
2393         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
2394 }
2395
2396 int
2397 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2398 {
2399         struct rte_eth_dev *dev;
2400
2401         if (!rte_eth_dev_is_valid_port(port_id)) {
2402                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2403                 return (-ENODEV);
2404         }
2405
2406         dev = &rte_eth_devices[port_id];
2407         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2408         memset(fc_conf, 0, sizeof(*fc_conf));
2409         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2410 }
2411
2412 int
2413 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2414 {
2415         struct rte_eth_dev *dev;
2416
2417         if (!rte_eth_dev_is_valid_port(port_id)) {
2418                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2419                 return (-ENODEV);
2420         }
2421
2422         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2423                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2424                 return (-EINVAL);
2425         }
2426
2427         dev = &rte_eth_devices[port_id];
2428         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2429         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2430 }
2431
2432 int
2433 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
2434 {
2435         struct rte_eth_dev *dev;
2436
2437         if (!rte_eth_dev_is_valid_port(port_id)) {
2438                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2439                 return (-ENODEV);
2440         }
2441
2442         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2443                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2444                 return (-EINVAL);
2445         }
2446
2447         dev = &rte_eth_devices[port_id];
2448         /* High water, low water validation are device specific */
2449         if  (*dev->dev_ops->priority_flow_ctrl_set)
2450                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2451         return (-ENOTSUP);
2452 }
2453
2454 static inline int
2455 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2456                         uint16_t reta_size)
2457 {
2458         uint16_t i, num;
2459
2460         if (!reta_conf)
2461                 return -EINVAL;
2462
2463         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
2464                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
2465                                                         RTE_RETA_GROUP_SIZE);
2466                 return -EINVAL;
2467         }
2468
2469         num = reta_size / RTE_RETA_GROUP_SIZE;
2470         for (i = 0; i < num; i++) {
2471                 if (reta_conf[i].mask)
2472                         return 0;
2473         }
2474
2475         return -EINVAL;
2476 }
2477
2478 static inline int
2479 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2480                          uint16_t reta_size,
2481                          uint8_t max_rxq)
2482 {
2483         uint16_t i, idx, shift;
2484
2485         if (!reta_conf)
2486                 return -EINVAL;
2487
2488         if (max_rxq == 0) {
2489                 PMD_DEBUG_TRACE("No receive queue is available\n");
2490                 return -EINVAL;
2491         }
2492
2493         for (i = 0; i < reta_size; i++) {
2494                 idx = i / RTE_RETA_GROUP_SIZE;
2495                 shift = i % RTE_RETA_GROUP_SIZE;
2496                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2497                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2498                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2499                                 "the maximum rxq index: %u\n", idx, shift,
2500                                 reta_conf[idx].reta[shift], max_rxq);
2501                         return -EINVAL;
2502                 }
2503         }
2504
2505         return 0;
2506 }
2507
2508 int
2509 rte_eth_dev_rss_reta_update(uint8_t port_id,
2510                             struct rte_eth_rss_reta_entry64 *reta_conf,
2511                             uint16_t reta_size)
2512 {
2513         struct rte_eth_dev *dev;
2514         int ret;
2515
2516         if (!rte_eth_dev_is_valid_port(port_id)) {
2517                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2518                 return -ENODEV;
2519         }
2520
2521         /* Check mask bits */
2522         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2523         if (ret < 0)
2524                 return ret;
2525
2526         dev = &rte_eth_devices[port_id];
2527
2528         /* Check entry value */
2529         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2530                                 dev->data->nb_rx_queues);
2531         if (ret < 0)
2532                 return ret;
2533
2534         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2535         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2536 }
2537
2538 int
2539 rte_eth_dev_rss_reta_query(uint8_t port_id,
2540                            struct rte_eth_rss_reta_entry64 *reta_conf,
2541                            uint16_t reta_size)
2542 {
2543         struct rte_eth_dev *dev;
2544         int ret;
2545
2546         if (port_id >= nb_ports) {
2547                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2548                 return -ENODEV;
2549         }
2550
2551         /* Check mask bits */
2552         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2553         if (ret < 0)
2554                 return ret;
2555
2556         dev = &rte_eth_devices[port_id];
2557         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2558         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2559 }
2560
2561 int
2562 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2563 {
2564         struct rte_eth_dev *dev;
2565         uint16_t rss_hash_protos;
2566
2567         if (!rte_eth_dev_is_valid_port(port_id)) {
2568                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2569                 return (-ENODEV);
2570         }
2571
2572         rss_hash_protos = rss_conf->rss_hf;
2573         if ((rss_hash_protos != 0) &&
2574             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2575                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2576                                 rss_hash_protos);
2577                 return (-EINVAL);
2578         }
2579         dev = &rte_eth_devices[port_id];
2580         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2581         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2582 }
2583
2584 int
2585 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2586                               struct rte_eth_rss_conf *rss_conf)
2587 {
2588         struct rte_eth_dev *dev;
2589
2590         if (!rte_eth_dev_is_valid_port(port_id)) {
2591                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2592                 return (-ENODEV);
2593         }
2594
2595         dev = &rte_eth_devices[port_id];
2596         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2597         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2598 }
2599
2600 int
2601 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2602                            struct rte_eth_udp_tunnel *udp_tunnel)
2603 {
2604         struct rte_eth_dev *dev;
2605
2606         if (!rte_eth_dev_is_valid_port(port_id)) {
2607                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2608                 return -ENODEV;
2609         }
2610
2611         if (udp_tunnel == NULL) {
2612                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2613                 return -EINVAL;
2614         }
2615
2616         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2617                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2618                 return -EINVAL;
2619         }
2620
2621         dev = &rte_eth_devices[port_id];
2622         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2623         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2624 }
2625
2626 int
2627 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2628                               struct rte_eth_udp_tunnel *udp_tunnel)
2629 {
2630         struct rte_eth_dev *dev;
2631
2632         if (!rte_eth_dev_is_valid_port(port_id)) {
2633                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2634                 return -ENODEV;
2635         }
2636
2637         dev = &rte_eth_devices[port_id];
2638
2639         if (udp_tunnel == NULL) {
2640                 PMD_DEBUG_TRACE("Invalid udp_tunnel parametr\n");
2641                 return -EINVAL;
2642         }
2643
2644         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2645                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2646                 return -EINVAL;
2647         }
2648
2649         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2650         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2651 }
2652
2653 int
2654 rte_eth_led_on(uint8_t port_id)
2655 {
2656         struct rte_eth_dev *dev;
2657
2658         if (!rte_eth_dev_is_valid_port(port_id)) {
2659                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2660                 return (-ENODEV);
2661         }
2662
2663         dev = &rte_eth_devices[port_id];
2664         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2665         return ((*dev->dev_ops->dev_led_on)(dev));
2666 }
2667
2668 int
2669 rte_eth_led_off(uint8_t port_id)
2670 {
2671         struct rte_eth_dev *dev;
2672
2673         if (!rte_eth_dev_is_valid_port(port_id)) {
2674                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2675                 return (-ENODEV);
2676         }
2677
2678         dev = &rte_eth_devices[port_id];
2679         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2680         return ((*dev->dev_ops->dev_led_off)(dev));
2681 }
2682
2683 /*
2684  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2685  * an empty spot.
2686  */
2687 static inline int
2688 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2689 {
2690         struct rte_eth_dev_info dev_info;
2691         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2692         unsigned i;
2693
2694         rte_eth_dev_info_get(port_id, &dev_info);
2695
2696         for (i = 0; i < dev_info.max_mac_addrs; i++)
2697                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2698                         return i;
2699
2700         return -1;
2701 }
2702
2703 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
2704
2705 int
2706 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2707                         uint32_t pool)
2708 {
2709         struct rte_eth_dev *dev;
2710         int index;
2711         uint64_t pool_mask;
2712
2713         if (!rte_eth_dev_is_valid_port(port_id)) {
2714                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2715                 return (-ENODEV);
2716         }
2717
2718         dev = &rte_eth_devices[port_id];
2719         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2720
2721         if (is_zero_ether_addr(addr)) {
2722                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2723                         port_id);
2724                 return (-EINVAL);
2725         }
2726         if (pool >= ETH_64_POOLS) {
2727                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
2728                 return (-EINVAL);
2729         }
2730
2731         index = get_mac_addr_index(port_id, addr);
2732         if (index < 0) {
2733                 index = get_mac_addr_index(port_id, &null_mac_addr);
2734                 if (index < 0) {
2735                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2736                                 port_id);
2737                         return (-ENOSPC);
2738                 }
2739         } else {
2740                 pool_mask = dev->data->mac_pool_sel[index];
2741
2742                 /* Check if both MAC address and pool is alread there, and do nothing */
2743                 if (pool_mask & (1ULL << pool))
2744                         return 0;
2745         }
2746
2747         /* Update NIC */
2748         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2749
2750         /* Update address in NIC data structure */
2751         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2752
2753         /* Update pool bitmap in NIC data structure */
2754         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2755
2756         return 0;
2757 }
2758
2759 int
2760 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2761 {
2762         struct rte_eth_dev *dev;
2763         int index;
2764
2765         if (!rte_eth_dev_is_valid_port(port_id)) {
2766                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2767                 return (-ENODEV);
2768         }
2769
2770         dev = &rte_eth_devices[port_id];
2771         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2772
2773         index = get_mac_addr_index(port_id, addr);
2774         if (index == 0) {
2775                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2776                 return (-EADDRINUSE);
2777         } else if (index < 0)
2778                 return 0;  /* Do nothing if address wasn't found */
2779
2780         /* Update NIC */
2781         (*dev->dev_ops->mac_addr_remove)(dev, index);
2782
2783         /* Update address in NIC data structure */
2784         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2785
2786         /* reset pool bitmap */
2787         dev->data->mac_pool_sel[index] = 0;
2788
2789         return 0;
2790 }
2791
2792 int
2793 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2794                                 uint16_t rx_mode, uint8_t on)
2795 {
2796         uint16_t num_vfs;
2797         struct rte_eth_dev *dev;
2798         struct rte_eth_dev_info dev_info;
2799
2800         if (!rte_eth_dev_is_valid_port(port_id)) {
2801                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
2802                                 port_id);
2803                 return (-ENODEV);
2804         }
2805
2806         dev = &rte_eth_devices[port_id];
2807         rte_eth_dev_info_get(port_id, &dev_info);
2808
2809         num_vfs = dev_info.max_vfs;
2810         if (vf > num_vfs)
2811         {
2812                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2813                 return (-EINVAL);
2814         }
2815         if (rx_mode == 0)
2816         {
2817                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2818                 return (-EINVAL);
2819         }
2820         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2821         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2822 }
2823
2824 /*
2825  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2826  * an empty spot.
2827  */
2828 static inline int
2829 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2830 {
2831         struct rte_eth_dev_info dev_info;
2832         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2833         unsigned i;
2834
2835         rte_eth_dev_info_get(port_id, &dev_info);
2836         if (!dev->data->hash_mac_addrs)
2837                 return -1;
2838
2839         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2840                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2841                         ETHER_ADDR_LEN) == 0)
2842                         return i;
2843
2844         return -1;
2845 }
2846
2847 int
2848 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2849                                 uint8_t on)
2850 {
2851         int index;
2852         int ret;
2853         struct rte_eth_dev *dev;
2854
2855         if (!rte_eth_dev_is_valid_port(port_id)) {
2856                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2857                         port_id);
2858                 return (-ENODEV);
2859         }
2860
2861         dev = &rte_eth_devices[port_id];
2862         if (is_zero_ether_addr(addr)) {
2863                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2864                         port_id);
2865                 return (-EINVAL);
2866         }
2867
2868         index = get_hash_mac_addr_index(port_id, addr);
2869         /* Check if it's already there, and do nothing */
2870         if ((index >= 0) && (on))
2871                 return 0;
2872
2873         if (index < 0) {
2874                 if (!on) {
2875                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
2876                                 "set in UTA\n", port_id);
2877                         return (-EINVAL);
2878                 }
2879
2880                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2881                 if (index < 0) {
2882                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2883                                         port_id);
2884                         return (-ENOSPC);
2885                 }
2886         }
2887
2888         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2889         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2890         if (ret == 0) {
2891                 /* Update address in NIC data structure */
2892                 if (on)
2893                         ether_addr_copy(addr,
2894                                         &dev->data->hash_mac_addrs[index]);
2895                 else
2896                         ether_addr_copy(&null_mac_addr,
2897                                         &dev->data->hash_mac_addrs[index]);
2898         }
2899
2900         return ret;
2901 }
2902
2903 int
2904 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2905 {
2906         struct rte_eth_dev *dev;
2907
2908         if (!rte_eth_dev_is_valid_port(port_id)) {
2909                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2910                         port_id);
2911                 return (-ENODEV);
2912         }
2913
2914         dev = &rte_eth_devices[port_id];
2915
2916         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2917         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2918 }
2919
2920 int
2921 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
2922 {
2923         uint16_t num_vfs;
2924         struct rte_eth_dev *dev;
2925         struct rte_eth_dev_info dev_info;
2926
2927         if (!rte_eth_dev_is_valid_port(port_id)) {
2928                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2929                 return (-ENODEV);
2930         }
2931
2932         dev = &rte_eth_devices[port_id];
2933         rte_eth_dev_info_get(port_id, &dev_info);
2934
2935         num_vfs = dev_info.max_vfs;
2936         if (vf > num_vfs)
2937         {
2938                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2939                 return (-EINVAL);
2940         }
2941
2942         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2943         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2944 }
2945
2946 int
2947 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2948 {
2949         uint16_t num_vfs;
2950         struct rte_eth_dev *dev;
2951         struct rte_eth_dev_info dev_info;
2952
2953         if (!rte_eth_dev_is_valid_port(port_id)) {
2954                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2955                 return (-ENODEV);
2956         }
2957
2958         dev = &rte_eth_devices[port_id];
2959         rte_eth_dev_info_get(port_id, &dev_info);
2960
2961         num_vfs = dev_info.max_vfs;
2962         if (vf > num_vfs)
2963         {
2964                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2965                 return (-EINVAL);
2966         }
2967
2968         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2969         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2970 }
2971
2972 int
2973 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2974                                  uint64_t vf_mask,uint8_t vlan_on)
2975 {
2976         struct rte_eth_dev *dev;
2977
2978         if (!rte_eth_dev_is_valid_port(port_id)) {
2979                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2980                                 port_id);
2981                 return (-ENODEV);
2982         }
2983         dev = &rte_eth_devices[port_id];
2984
2985         if(vlan_id > ETHER_MAX_VLAN_ID)
2986         {
2987                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2988                         vlan_id);
2989                 return (-EINVAL);
2990         }
2991         if (vf_mask == 0)
2992         {
2993                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2994                 return (-EINVAL);
2995         }
2996
2997         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2998         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2999                                                 vf_mask,vlan_on);
3000 }
3001
3002 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
3003                                         uint16_t tx_rate)
3004 {
3005         struct rte_eth_dev *dev;
3006         struct rte_eth_dev_info dev_info;
3007         struct rte_eth_link link;
3008
3009         if (!rte_eth_dev_is_valid_port(port_id)) {
3010                 PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
3011                                 port_id);
3012                 return -ENODEV;
3013         }
3014
3015         dev = &rte_eth_devices[port_id];
3016         rte_eth_dev_info_get(port_id, &dev_info);
3017         link = dev->data->dev_link;
3018
3019         if (queue_idx > dev_info.max_tx_queues) {
3020                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3021                                 "invalid queue id=%d\n", port_id, queue_idx);
3022                 return -EINVAL;
3023         }
3024
3025         if (tx_rate > link.link_speed) {
3026                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3027                                 "bigger than link speed= %d\n",
3028                         tx_rate, link.link_speed);
3029                 return -EINVAL;
3030         }
3031
3032         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3033         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
3034 }
3035
3036 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
3037                                 uint64_t q_msk)
3038 {
3039         struct rte_eth_dev *dev;
3040         struct rte_eth_dev_info dev_info;
3041         struct rte_eth_link link;
3042
3043         if (q_msk == 0)
3044                 return 0;
3045
3046         if (!rte_eth_dev_is_valid_port(port_id)) {
3047                 PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
3048                                 port_id);
3049                 return -ENODEV;
3050         }
3051
3052         dev = &rte_eth_devices[port_id];
3053         rte_eth_dev_info_get(port_id, &dev_info);
3054         link = dev->data->dev_link;
3055
3056         if (vf > dev_info.max_vfs) {
3057                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
3058                                 "invalid vf id=%d\n", port_id, vf);
3059                 return -EINVAL;
3060         }
3061
3062         if (tx_rate > link.link_speed) {
3063                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
3064                                 "bigger than link speed= %d\n",
3065                                 tx_rate, link.link_speed);
3066                 return -EINVAL;
3067         }
3068
3069         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
3070         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
3071 }
3072
3073 int
3074 rte_eth_mirror_rule_set(uint8_t port_id,
3075                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
3076                         uint8_t rule_id, uint8_t on)
3077 {
3078         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3079
3080         if (!rte_eth_dev_is_valid_port(port_id)) {
3081                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3082                 return (-ENODEV);
3083         }
3084
3085         if (mirror_conf->rule_type_mask == 0) {
3086                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3087                 return (-EINVAL);
3088         }
3089
3090         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3091                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
3092                         "be 0-%d\n",ETH_64_POOLS - 1);
3093                 return (-EINVAL);
3094         }
3095
3096         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
3097                 (mirror_conf->pool_mask == 0)) {
3098                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
3099                                 "be 0.\n");
3100                 return (-EINVAL);
3101         }
3102
3103         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
3104         {
3105                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
3106                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
3107                 return (-EINVAL);
3108         }
3109
3110         dev = &rte_eth_devices[port_id];
3111         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3112
3113         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
3114 }
3115
3116 int
3117 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
3118 {
3119         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3120
3121         if (!rte_eth_dev_is_valid_port(port_id)) {
3122                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3123                 return (-ENODEV);
3124         }
3125
3126         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
3127         {
3128                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
3129                         ETH_VMDQ_NUM_MIRROR_RULE-1);
3130                 return (-EINVAL);
3131         }
3132
3133         dev = &rte_eth_devices[port_id];
3134         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3135
3136         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
3137 }
3138
3139 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3140 uint16_t
3141 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
3142                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
3143 {
3144         struct rte_eth_dev *dev;
3145
3146         if (!rte_eth_dev_is_valid_port(port_id)) {
3147                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3148                 return 0;
3149         }
3150
3151         dev = &rte_eth_devices[port_id];
3152         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
3153         if (queue_id >= dev->data->nb_rx_queues) {
3154                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3155                 return 0;
3156         }
3157         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
3158                                                 rx_pkts, nb_pkts);
3159 }
3160
3161 uint16_t
3162 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
3163                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3164 {
3165         struct rte_eth_dev *dev;
3166
3167         if (!rte_eth_dev_is_valid_port(port_id)) {
3168                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3169                 return 0;
3170         }
3171
3172         dev = &rte_eth_devices[port_id];
3173
3174         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
3175         if (queue_id >= dev->data->nb_tx_queues) {
3176                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3177                 return 0;
3178         }
3179         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
3180                                                 tx_pkts, nb_pkts);
3181 }
3182
3183 uint32_t
3184 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
3185 {
3186         struct rte_eth_dev *dev;
3187
3188         if (!rte_eth_dev_is_valid_port(port_id)) {
3189                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3190                 return 0;
3191         }
3192
3193         dev = &rte_eth_devices[port_id];
3194         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
3195         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
3196 }
3197
3198 int
3199 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
3200 {
3201         struct rte_eth_dev *dev;
3202
3203         if (!rte_eth_dev_is_valid_port(port_id)) {
3204                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3205                 return (-ENODEV);
3206         }
3207
3208         dev = &rte_eth_devices[port_id];
3209         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
3210         return (*dev->dev_ops->rx_descriptor_done)( \
3211                 dev->data->rx_queues[queue_id], offset);
3212 }
3213 #endif
3214
3215 int
3216 rte_eth_dev_callback_register(uint8_t port_id,
3217                         enum rte_eth_event_type event,
3218                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3219 {
3220         struct rte_eth_dev *dev;
3221         struct rte_eth_dev_callback *user_cb;
3222
3223         if (!cb_fn)
3224                 return (-EINVAL);
3225
3226         if (!rte_eth_dev_is_valid_port(port_id)) {
3227                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3228                 return (-EINVAL);
3229         }
3230
3231         dev = &rte_eth_devices[port_id];
3232         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3233
3234         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3235                 if (user_cb->cb_fn == cb_fn &&
3236                         user_cb->cb_arg == cb_arg &&
3237                         user_cb->event == event) {
3238                         break;
3239                 }
3240         }
3241
3242         /* create a new callback. */
3243         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3244                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
3245                 user_cb->cb_fn = cb_fn;
3246                 user_cb->cb_arg = cb_arg;
3247                 user_cb->event = event;
3248                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
3249         }
3250
3251         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3252         return ((user_cb == NULL) ? -ENOMEM : 0);
3253 }
3254
3255 int
3256 rte_eth_dev_callback_unregister(uint8_t port_id,
3257                         enum rte_eth_event_type event,
3258                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3259 {
3260         int ret;
3261         struct rte_eth_dev *dev;
3262         struct rte_eth_dev_callback *cb, *next;
3263
3264         if (!cb_fn)
3265                 return (-EINVAL);
3266
3267         if (!rte_eth_dev_is_valid_port(port_id)) {
3268                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3269                 return (-EINVAL);
3270         }
3271
3272         dev = &rte_eth_devices[port_id];
3273         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3274
3275         ret = 0;
3276         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
3277
3278                 next = TAILQ_NEXT(cb, next);
3279
3280                 if (cb->cb_fn != cb_fn || cb->event != event ||
3281                                 (cb->cb_arg != (void *)-1 &&
3282                                 cb->cb_arg != cb_arg))
3283                         continue;
3284
3285                 /*
3286                  * if this callback is not executing right now,
3287                  * then remove it.
3288                  */
3289                 if (cb->active == 0) {
3290                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3291                         rte_free(cb);
3292                 } else {
3293                         ret = -EAGAIN;
3294                 }
3295         }
3296
3297         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3298         return (ret);
3299 }
3300
3301 void
3302 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3303         enum rte_eth_event_type event)
3304 {
3305         struct rte_eth_dev_callback *cb_lst;
3306         struct rte_eth_dev_callback dev_cb;
3307
3308         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3309         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3310                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3311                         continue;
3312                 dev_cb = *cb_lst;
3313                 cb_lst->active = 1;
3314                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3315                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3316                                                 dev_cb.cb_arg);
3317                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3318                 cb_lst->active = 0;
3319         }
3320         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3321 }
3322 #ifdef RTE_NIC_BYPASS
3323 int rte_eth_dev_bypass_init(uint8_t port_id)
3324 {
3325         struct rte_eth_dev *dev;
3326
3327         if (!rte_eth_dev_is_valid_port(port_id)) {
3328                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3329                 return (-ENODEV);
3330         }
3331
3332         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3333                 PMD_DEBUG_TRACE("Invalid port device\n");
3334                 return (-ENODEV);
3335         }
3336
3337         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
3338         (*dev->dev_ops->bypass_init)(dev);
3339         return 0;
3340 }
3341
3342 int
3343 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
3344 {
3345         struct rte_eth_dev *dev;
3346
3347         if (!rte_eth_dev_is_valid_port(port_id)) {
3348                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3349                 return (-ENODEV);
3350         }
3351
3352         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3353                 PMD_DEBUG_TRACE("Invalid port device\n");
3354                 return (-ENODEV);
3355         }
3356         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3357         (*dev->dev_ops->bypass_state_show)(dev, state);
3358         return 0;
3359 }
3360
3361 int
3362 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
3363 {
3364         struct rte_eth_dev *dev;
3365
3366         if (!rte_eth_dev_is_valid_port(port_id)) {
3367                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3368                 return (-ENODEV);
3369         }
3370
3371         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3372                 PMD_DEBUG_TRACE("Invalid port device\n");
3373                 return (-ENODEV);
3374         }
3375
3376         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
3377         (*dev->dev_ops->bypass_state_set)(dev, new_state);
3378         return 0;
3379 }
3380
3381 int
3382 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
3383 {
3384         struct rte_eth_dev *dev;
3385
3386         if (!rte_eth_dev_is_valid_port(port_id)) {
3387                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3388                 return (-ENODEV);
3389         }
3390
3391         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3392                 PMD_DEBUG_TRACE("Invalid port device\n");
3393                 return (-ENODEV);
3394         }
3395
3396         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3397         (*dev->dev_ops->bypass_event_show)(dev, event, state);
3398         return 0;
3399 }
3400
3401 int
3402 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
3403 {
3404         struct rte_eth_dev *dev;
3405
3406         if (!rte_eth_dev_is_valid_port(port_id)) {
3407                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3408                 return (-ENODEV);
3409         }
3410
3411         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3412                 PMD_DEBUG_TRACE("Invalid port device\n");
3413                 return (-ENODEV);
3414         }
3415
3416         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
3417         (*dev->dev_ops->bypass_event_set)(dev, event, state);
3418         return 0;
3419 }
3420
3421 int
3422 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
3423 {
3424         struct rte_eth_dev *dev;
3425
3426         if (!rte_eth_dev_is_valid_port(port_id)) {
3427                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3428                 return (-ENODEV);
3429         }
3430
3431         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3432                 PMD_DEBUG_TRACE("Invalid port device\n");
3433                 return (-ENODEV);
3434         }
3435
3436         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
3437         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
3438         return 0;
3439 }
3440
3441 int
3442 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
3443 {
3444         struct rte_eth_dev *dev;
3445
3446         if (!rte_eth_dev_is_valid_port(port_id)) {
3447                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3448                 return (-ENODEV);
3449         }
3450
3451         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3452                 PMD_DEBUG_TRACE("Invalid port device\n");
3453                 return (-ENODEV);
3454         }
3455
3456         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
3457         (*dev->dev_ops->bypass_ver_show)(dev, ver);
3458         return 0;
3459 }
3460
3461 int
3462 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
3463 {
3464         struct rte_eth_dev *dev;
3465
3466         if (!rte_eth_dev_is_valid_port(port_id)) {
3467                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3468                 return (-ENODEV);
3469         }
3470
3471         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3472                 PMD_DEBUG_TRACE("Invalid port device\n");
3473                 return (-ENODEV);
3474         }
3475
3476         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
3477         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
3478         return 0;
3479 }
3480
3481 int
3482 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
3483 {
3484         struct rte_eth_dev *dev;
3485
3486         if (!rte_eth_dev_is_valid_port(port_id)) {
3487                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3488                 return (-ENODEV);
3489         }
3490
3491         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3492                 PMD_DEBUG_TRACE("Invalid port device\n");
3493                 return (-ENODEV);
3494         }
3495
3496         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
3497         (*dev->dev_ops->bypass_wd_reset)(dev);
3498         return 0;
3499 }
3500 #endif
3501
3502 int
3503 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
3504 {
3505         struct rte_eth_dev *dev;
3506
3507         if (!rte_eth_dev_is_valid_port(port_id)) {
3508                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3509                 return -ENODEV;
3510         }
3511
3512         dev = &rte_eth_devices[port_id];
3513         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3514         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3515                                 RTE_ETH_FILTER_NOP, NULL);
3516 }
3517
3518 int
3519 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3520                        enum rte_filter_op filter_op, void *arg)
3521 {
3522         struct rte_eth_dev *dev;
3523
3524         if (!rte_eth_dev_is_valid_port(port_id)) {
3525                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3526                 return -ENODEV;
3527         }
3528
3529         dev = &rte_eth_devices[port_id];
3530         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3531         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3532 }
3533
3534 void *
3535 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3536                 rte_rxtx_callback_fn fn, void *user_param)
3537 {
3538 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3539         rte_errno = ENOTSUP;
3540         return NULL;
3541 #endif
3542         /* check input parameters */
3543         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3544                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3545                 rte_errno = EINVAL;
3546                 return NULL;
3547         }
3548
3549         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3550
3551         if (cb == NULL) {
3552                 rte_errno = ENOMEM;
3553                 return NULL;
3554         }
3555
3556         cb->fn = fn;
3557         cb->param = user_param;
3558         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3559         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3560         return cb;
3561 }
3562
3563 void *
3564 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3565                 rte_rxtx_callback_fn fn, void *user_param)
3566 {
3567 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3568         rte_errno = ENOTSUP;
3569         return NULL;
3570 #endif
3571         /* check input parameters */
3572         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3573                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3574                 rte_errno = EINVAL;
3575                 return NULL;
3576         }
3577
3578         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3579
3580         if (cb == NULL) {
3581                 rte_errno = ENOMEM;
3582                 return NULL;
3583         }
3584
3585         cb->fn = fn;
3586         cb->param = user_param;
3587         cb->next = rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3588         rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3589         return cb;
3590 }
3591
3592 int
3593 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3594                 struct rte_eth_rxtx_callback *user_cb)
3595 {
3596 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3597         return (-ENOTSUP);
3598 #endif
3599         /* Check input parameters. */
3600         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3601                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3602                 return (-EINVAL);
3603         }
3604
3605         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3606         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3607         struct rte_eth_rxtx_callback *prev_cb;
3608
3609         /* Reset head pointer and remove user cb if first in the list. */
3610         if (cb == user_cb) {
3611                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3612                 return 0;
3613         }
3614
3615         /* Remove the user cb from the callback list. */
3616         do {
3617                 prev_cb = cb;
3618                 cb = cb->next;
3619
3620                 if (cb == user_cb) {
3621                         prev_cb->next = user_cb->next;
3622                         return 0;
3623                 }
3624
3625         } while (cb != NULL);
3626
3627         /* Callback wasn't found. */
3628         return (-EINVAL);
3629 }
3630
3631 int
3632 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3633                 struct rte_eth_rxtx_callback *user_cb)
3634 {
3635 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3636         return (-ENOTSUP);
3637 #endif
3638         /* Check input parameters. */
3639         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3640                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3641                 return (-EINVAL);
3642         }
3643
3644         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3645         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3646         struct rte_eth_rxtx_callback *prev_cb;
3647
3648         /* Reset head pointer and remove user cb if first in the list. */
3649         if (cb == user_cb) {
3650                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3651                 return 0;
3652         }
3653
3654         /* Remove the user cb from the callback list. */
3655         do {
3656                 prev_cb = cb;
3657                 cb = cb->next;
3658
3659                 if (cb == user_cb) {
3660                         prev_cb->next = user_cb->next;
3661                         return 0;
3662                 }
3663
3664         } while (cb != NULL);
3665
3666         /* Callback wasn't found. */
3667         return (-EINVAL);
3668 }