833e2987ec8c825ade01ea85ed15a2321b6db8b6
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68 #include <rte_string_fns.h>
69
70 #include "rte_ether.h"
71 #include "rte_ethdev.h"
72
73 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
74 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
75                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
76         } while (0)
77 #else
78 #define PMD_DEBUG_TRACE(fmt, args...)
79 #endif
80
81 /* Macros for checking for restricting functions to primary instance only */
82 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
83         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
84                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
85                 return (retval); \
86         } \
87 } while(0)
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while(0)
94
95 /* Macros to check for invlaid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while(0)
102 #define FUNC_PTR_OR_RET(func) do { \
103         if ((func) == NULL) { \
104                 PMD_DEBUG_TRACE("Function not supported\n"); \
105                 return; \
106         } \
107 } while(0)
108
109 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
110 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
111 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
112 static uint8_t nb_ports = 0;
113
114 /* spinlock for eth device callbacks */
115 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
116
117 /* store statistics names and its offset in stats structure  */
118 struct rte_eth_xstats_name_off {
119         char name[RTE_ETH_XSTATS_NAME_SIZE];
120         unsigned offset;
121 };
122
123 static struct rte_eth_xstats_name_off rte_stats_strings[] = {
124          {"rx_packets", offsetof(struct rte_eth_stats, ipackets)},
125          {"tx_packets", offsetof(struct rte_eth_stats, opackets)},
126          {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)},
127          {"tx_bytes", offsetof(struct rte_eth_stats, obytes)},
128          {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
129          {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
130          {"rx_crc_errors", offsetof(struct rte_eth_stats, ibadcrc)},
131          {"rx_bad_length_errors", offsetof(struct rte_eth_stats, ibadlen)},
132          {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
133          {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)},
134          {"fdir_match", offsetof(struct rte_eth_stats, fdirmatch)},
135          {"fdir_miss", offsetof(struct rte_eth_stats, fdirmiss)},
136          {"tx_flow_control_xon", offsetof(struct rte_eth_stats, tx_pause_xon)},
137          {"rx_flow_control_xon", offsetof(struct rte_eth_stats, rx_pause_xon)},
138          {"tx_flow_control_xoff", offsetof(struct rte_eth_stats, tx_pause_xoff)},
139          {"rx_flow_control_xoff", offsetof(struct rte_eth_stats, rx_pause_xoff)},
140 };
141 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
142
143 static struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
144         {"rx_packets", offsetof(struct rte_eth_stats, q_ipackets)},
145         {"rx_bytes", offsetof(struct rte_eth_stats, q_ibytes)},
146 };
147 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
148                 sizeof(rte_rxq_stats_strings[0]))
149
150 static struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
151         {"tx_packets", offsetof(struct rte_eth_stats, q_opackets)},
152         {"tx_bytes", offsetof(struct rte_eth_stats, q_obytes)},
153         {"tx_errors", offsetof(struct rte_eth_stats, q_errors)},
154 };
155 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
156                 sizeof(rte_txq_stats_strings[0]))
157
158
159 /**
160  * The user application callback description.
161  *
162  * It contains callback address to be registered by user application,
163  * the pointer to the parameters for callback, and the event type.
164  */
165 struct rte_eth_dev_callback {
166         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
167         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
168         void *cb_arg;                           /**< Parameter for callback */
169         enum rte_eth_event_type event;          /**< Interrupt event type */
170         uint32_t active;                        /**< Callback is executing */
171 };
172
173 enum {
174         STAT_QMAP_TX = 0,
175         STAT_QMAP_RX
176 };
177
178 enum {
179         DEV_DETACHED = 0,
180         DEV_ATTACHED
181 };
182
183 static inline void
184 rte_eth_dev_data_alloc(void)
185 {
186         const unsigned flags = 0;
187         const struct rte_memzone *mz;
188
189         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
190                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
191                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
192                                 rte_socket_id(), flags);
193         } else
194                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
195         if (mz == NULL)
196                 rte_panic("Cannot allocate memzone for ethernet port data\n");
197
198         rte_eth_dev_data = mz->addr;
199         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
200                 memset(rte_eth_dev_data, 0,
201                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
202 }
203
204 struct rte_eth_dev *
205 rte_eth_dev_allocated(const char *name)
206 {
207         unsigned i;
208
209         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
210                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
211                     strcmp(rte_eth_devices[i].data->name, name) == 0)
212                         return &rte_eth_devices[i];
213         }
214         return NULL;
215 }
216
217 static uint8_t
218 rte_eth_dev_find_free_port(void)
219 {
220         unsigned i;
221
222         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
223                 if (rte_eth_devices[i].attached == DEV_DETACHED)
224                         return i;
225         }
226         return RTE_MAX_ETHPORTS;
227 }
228
229 struct rte_eth_dev *
230 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
231 {
232         uint8_t port_id;
233         struct rte_eth_dev *eth_dev;
234
235         port_id = rte_eth_dev_find_free_port();
236         if (port_id == RTE_MAX_ETHPORTS) {
237                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
238                 return NULL;
239         }
240
241         if (rte_eth_dev_data == NULL)
242                 rte_eth_dev_data_alloc();
243
244         if (rte_eth_dev_allocated(name) != NULL) {
245                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n", name);
246                 return NULL;
247         }
248
249         eth_dev = &rte_eth_devices[port_id];
250         eth_dev->data = &rte_eth_dev_data[port_id];
251         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
252         eth_dev->data->port_id = port_id;
253         eth_dev->attached = DEV_ATTACHED;
254         eth_dev->dev_type = type;
255         nb_ports++;
256         return eth_dev;
257 }
258
259 static inline int
260 rte_eth_dev_create_unique_device_name(char *name, size_t size,
261                 struct rte_pci_device *pci_dev)
262 {
263         int ret;
264
265         if ((name == NULL) || (pci_dev == NULL))
266                 return -EINVAL;
267
268         ret = snprintf(name, size, "%d:%d.%d",
269                         pci_dev->addr.bus, pci_dev->addr.devid,
270                         pci_dev->addr.function);
271         if (ret < 0)
272                 return ret;
273         return 0;
274 }
275
276 int
277 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
278 {
279         if (eth_dev == NULL)
280                 return -EINVAL;
281
282         eth_dev->attached = 0;
283         nb_ports--;
284         return 0;
285 }
286
287 static int
288 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
289                  struct rte_pci_device *pci_dev)
290 {
291         struct eth_driver    *eth_drv;
292         struct rte_eth_dev *eth_dev;
293         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
294
295         int diag;
296
297         eth_drv = (struct eth_driver *)pci_drv;
298
299         /* Create unique Ethernet device name using PCI address */
300         rte_eth_dev_create_unique_device_name(ethdev_name,
301                         sizeof(ethdev_name), pci_dev);
302
303         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
304         if (eth_dev == NULL)
305                 return -ENOMEM;
306
307         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
308                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
309                                   eth_drv->dev_private_size,
310                                   RTE_CACHE_LINE_SIZE);
311                 if (eth_dev->data->dev_private == NULL)
312                         rte_panic("Cannot allocate memzone for private port data\n");
313         }
314         eth_dev->pci_dev = pci_dev;
315         eth_dev->driver = eth_drv;
316         eth_dev->data->rx_mbuf_alloc_failed = 0;
317
318         /* init user callbacks */
319         TAILQ_INIT(&(eth_dev->link_intr_cbs));
320
321         /*
322          * Set the default MTU.
323          */
324         eth_dev->data->mtu = ETHER_MTU;
325
326         /* Invoke PMD device initialization function */
327         diag = (*eth_drv->eth_dev_init)(eth_dev);
328         if (diag == 0)
329                 return (0);
330
331         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
332                         " failed\n", pci_drv->name,
333                         (unsigned) pci_dev->id.vendor_id,
334                         (unsigned) pci_dev->id.device_id);
335         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
336                 rte_free(eth_dev->data->dev_private);
337         eth_dev->attached = DEV_DETACHED;
338         nb_ports--;
339         return diag;
340 }
341
342 static int
343 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
344 {
345         const struct eth_driver *eth_drv;
346         struct rte_eth_dev *eth_dev;
347         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
348         int ret;
349
350         if (pci_dev == NULL)
351                 return -EINVAL;
352
353         /* Create unique Ethernet device name using PCI address */
354         rte_eth_dev_create_unique_device_name(ethdev_name,
355                         sizeof(ethdev_name), pci_dev);
356
357         eth_dev = rte_eth_dev_allocated(ethdev_name);
358         if (eth_dev == NULL)
359                 return -ENODEV;
360
361         eth_drv = (const struct eth_driver *)pci_dev->driver;
362
363         /* Invoke PMD device uninit function */
364         if (*eth_drv->eth_dev_uninit) {
365                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
366                 if (ret)
367                         return ret;
368         }
369
370         /* free ether device */
371         rte_eth_dev_release_port(eth_dev);
372
373         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
374                 rte_free(eth_dev->data->dev_private);
375
376         eth_dev->pci_dev = NULL;
377         eth_dev->driver = NULL;
378         eth_dev->data = NULL;
379
380         return 0;
381 }
382
383 /**
384  * Register an Ethernet [Poll Mode] driver.
385  *
386  * Function invoked by the initialization function of an Ethernet driver
387  * to simultaneously register itself as a PCI driver and as an Ethernet
388  * Poll Mode Driver.
389  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
390  * structure embedded in the *eth_drv* structure, after having stored the
391  * address of the rte_eth_dev_init() function in the *devinit* field of
392  * the *pci_drv* structure.
393  * During the PCI probing phase, the rte_eth_dev_init() function is
394  * invoked for each PCI [Ethernet device] matching the embedded PCI
395  * identifiers provided by the driver.
396  */
397 void
398 rte_eth_driver_register(struct eth_driver *eth_drv)
399 {
400         eth_drv->pci_drv.devinit = rte_eth_dev_init;
401         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
402         rte_eal_pci_register(&eth_drv->pci_drv);
403 }
404
405 static int
406 rte_eth_dev_is_valid_port(uint8_t port_id)
407 {
408         if (port_id >= RTE_MAX_ETHPORTS ||
409             rte_eth_devices[port_id].attached != DEV_ATTACHED)
410                 return 0;
411         else
412                 return 1;
413 }
414
415 int
416 rte_eth_dev_socket_id(uint8_t port_id)
417 {
418         if (!rte_eth_dev_is_valid_port(port_id))
419                 return -1;
420         return rte_eth_devices[port_id].pci_dev->numa_node;
421 }
422
423 uint8_t
424 rte_eth_dev_count(void)
425 {
426         return (nb_ports);
427 }
428
429 /* So far, DPDK hotplug function only supports linux */
430 #ifdef RTE_LIBRTE_EAL_HOTPLUG
431
432 static enum rte_eth_dev_type
433 rte_eth_dev_get_device_type(uint8_t port_id)
434 {
435         if (!rte_eth_dev_is_valid_port(port_id))
436                 return RTE_ETH_DEV_UNKNOWN;
437         return rte_eth_devices[port_id].dev_type;
438 }
439
440 static int
441 rte_eth_dev_save(struct rte_eth_dev *devs, size_t size)
442 {
443         if ((devs == NULL) ||
444             (size != sizeof(struct rte_eth_dev) * RTE_MAX_ETHPORTS))
445                 return -EINVAL;
446
447         /* save current rte_eth_devices */
448         memcpy(devs, rte_eth_devices, size);
449         return 0;
450 }
451
452 static int
453 rte_eth_dev_get_changed_port(struct rte_eth_dev *devs, uint8_t *port_id)
454 {
455         if ((devs == NULL) || (port_id == NULL))
456                 return -EINVAL;
457
458         /* check which port was attached or detached */
459         for (*port_id = 0; *port_id < RTE_MAX_ETHPORTS; (*port_id)++, devs++) {
460                 if (rte_eth_devices[*port_id].attached ^ devs->attached)
461                         return 0;
462         }
463         return -ENODEV;
464 }
465
466 static int
467 rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
468 {
469         if (!rte_eth_dev_is_valid_port(port_id)) {
470                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
471                 return -EINVAL;
472         }
473
474         if (addr == NULL) {
475                 PMD_DEBUG_TRACE("Null pointer is specified\n");
476                 return -EINVAL;
477         }
478
479         *addr = rte_eth_devices[port_id].pci_dev->addr;
480         return 0;
481 }
482
483 static int
484 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
485 {
486         char *tmp;
487
488         if (!rte_eth_dev_is_valid_port(port_id)) {
489                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
490                 return -EINVAL;
491         }
492
493         if (name == NULL) {
494                 PMD_DEBUG_TRACE("Null pointer is specified\n");
495                 return -EINVAL;
496         }
497
498         /* shouldn't check 'rte_eth_devices[i].data',
499          * because it might be overwritten by VDEV PMD */
500         tmp = rte_eth_dev_data[port_id].name;
501         strcpy(name, tmp);
502         return 0;
503 }
504
505 static int
506 rte_eth_dev_is_detachable(uint8_t port_id)
507 {
508         uint32_t drv_flags;
509
510         if (port_id >= RTE_MAX_ETHPORTS) {
511                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
512                 return -EINVAL;
513         }
514
515         if (rte_eth_devices[port_id].dev_type == RTE_ETH_DEV_PCI) {
516                 switch (rte_eth_devices[port_id].pci_dev->pt_driver) {
517                 case RTE_PT_IGB_UIO:
518                 case RTE_PT_UIO_GENERIC:
519                         break;
520                 case RTE_PT_VFIO:
521                 default:
522                         return -ENOTSUP;
523                 }
524         }
525
526         drv_flags = rte_eth_devices[port_id].driver->pci_drv.drv_flags;
527         return !(drv_flags & RTE_PCI_DRV_DETACHABLE);
528 }
529
530 /* attach the new physical device, then store port_id of the device */
531 static int
532 rte_eth_dev_attach_pdev(struct rte_pci_addr *addr, uint8_t *port_id)
533 {
534         uint8_t new_port_id;
535         struct rte_eth_dev devs[RTE_MAX_ETHPORTS];
536
537         if ((addr == NULL) || (port_id == NULL))
538                 goto err;
539
540         /* save current port status */
541         if (rte_eth_dev_save(devs, sizeof(devs)))
542                 goto err;
543         /* re-construct pci_device_list */
544         if (rte_eal_pci_scan())
545                 goto err;
546         /* invoke probe func of the driver can handle the new device.
547          * TODO:
548          * rte_eal_pci_probe_one() should return port_id.
549          * And rte_eth_dev_save() and rte_eth_dev_get_changed_port()
550          * should be removed. */
551         if (rte_eal_pci_probe_one(addr))
552                 goto err;
553         /* get port_id enabled by above procedures */
554         if (rte_eth_dev_get_changed_port(devs, &new_port_id))
555                 goto err;
556
557         *port_id = new_port_id;
558         return 0;
559 err:
560         RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
561         return -1;
562 }
563
564 /* detach the new physical device, then store pci_addr of the device */
565 static int
566 rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr)
567 {
568         struct rte_pci_addr freed_addr;
569         struct rte_pci_addr vp;
570
571         if (addr == NULL)
572                 goto err;
573
574         /* check whether the driver supports detach feature, or not */
575         if (rte_eth_dev_is_detachable(port_id))
576                 goto err;
577
578         /* get pci address by port id */
579         if (rte_eth_dev_get_addr_by_port(port_id, &freed_addr))
580                 goto err;
581
582         /* Zerod pci addr means the port comes from virtual device */
583         vp.domain = vp.bus = vp.devid = vp.function = 0;
584         if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0)
585                 goto err;
586
587         /* invoke close func of the driver,
588          * also remove the device from pci_device_list */
589         if (rte_eal_pci_close_one(&freed_addr))
590                 goto err;
591
592         *addr = freed_addr;
593         return 0;
594 err:
595         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
596         return -1;
597 }
598
599 /* attach the new virtual device, then store port_id of the device */
600 static int
601 rte_eth_dev_attach_vdev(const char *vdevargs, uint8_t *port_id)
602 {
603         char *name = NULL, *args = NULL;
604         uint8_t new_port_id;
605         struct rte_eth_dev devs[RTE_MAX_ETHPORTS];
606         int ret = -1;
607
608         if ((vdevargs == NULL) || (port_id == NULL))
609                 goto end;
610
611         /* parse vdevargs, then retrieve device name and args */
612         if (rte_eal_parse_devargs_str(vdevargs, &name, &args))
613                 goto end;
614
615         /* save current port status */
616         if (rte_eth_dev_save(devs, sizeof(devs)))
617                 goto end;
618         /* walk around dev_driver_list to find the driver of the device,
619          * then invoke probe function o the driver.
620          * TODO:
621          * rte_eal_vdev_init() should return port_id,
622          * And rte_eth_dev_save() and rte_eth_dev_get_changed_port()
623          * should be removed. */
624         if (rte_eal_vdev_init(name, args))
625                 goto end;
626         /* get port_id enabled by above procedures */
627         if (rte_eth_dev_get_changed_port(devs, &new_port_id))
628                 goto end;
629         ret = 0;
630         *port_id = new_port_id;
631 end:
632         if (name)
633                 free(name);
634         if (args)
635                 free(args);
636
637         if (ret < 0)
638                 RTE_LOG(ERR, EAL, "Driver, cannot attach the device\n");
639         return ret;
640 }
641
642 /* detach the new virtual device, then store the name of the device */
643 static int
644 rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname)
645 {
646         char name[RTE_ETH_NAME_MAX_LEN];
647
648         if (vdevname == NULL)
649                 goto err;
650
651         /* check whether the driver supports detach feature, or not */
652         if (rte_eth_dev_is_detachable(port_id))
653                 goto err;
654
655         /* get device name by port id */
656         if (rte_eth_dev_get_name_by_port(port_id, name))
657                 goto err;
658         /* walk around dev_driver_list to find the driver of the device,
659          * then invoke close function o the driver */
660         if (rte_eal_vdev_uninit(name))
661                 goto err;
662
663         strncpy(vdevname, name, sizeof(name));
664         return 0;
665 err:
666         RTE_LOG(ERR, EAL, "Driver, cannot detach the device\n");
667         return -1;
668 }
669
670 /* attach the new device, then store port_id of the device */
671 int
672 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
673 {
674         struct rte_pci_addr addr;
675
676         if ((devargs == NULL) || (port_id == NULL))
677                 return -EINVAL;
678
679         if (eal_parse_pci_DomBDF(devargs, &addr) == 0)
680                 return rte_eth_dev_attach_pdev(&addr, port_id);
681         else
682                 return rte_eth_dev_attach_vdev(devargs, port_id);
683 }
684
685 /* detach the device, then store the name of the device */
686 int
687 rte_eth_dev_detach(uint8_t port_id, char *name)
688 {
689         struct rte_pci_addr addr;
690         int ret;
691
692         if (name == NULL)
693                 return -EINVAL;
694
695         if (rte_eth_dev_get_device_type(port_id) == RTE_ETH_DEV_PCI) {
696                 ret = rte_eth_dev_get_addr_by_port(port_id, &addr);
697                 if (ret < 0)
698                         return ret;
699
700                 ret = rte_eth_dev_detach_pdev(port_id, &addr);
701                 if (ret == 0)
702                         snprintf(name, RTE_ETH_NAME_MAX_LEN,
703                                 "%04x:%02x:%02x.%d",
704                                 addr.domain, addr.bus,
705                                 addr.devid, addr.function);
706
707                 return ret;
708         } else
709                 return rte_eth_dev_detach_vdev(port_id, name);
710 }
711 #else /* RTE_LIBRTE_EAL_HOTPLUG */
712 int
713 rte_eth_dev_attach(const char *devargs __rte_unused,
714                         uint8_t *port_id __rte_unused)
715 {
716         RTE_LOG(ERR, EAL, "Hotplug support isn't enabled\n");
717         return -1;
718 }
719
720 /* detach the device, then store the name of the device */
721 int
722 rte_eth_dev_detach(uint8_t port_id __rte_unused,
723                         char *name __rte_unused)
724 {
725         RTE_LOG(ERR, EAL, "Hotplug support isn't enabled\n");
726         return -1;
727 }
728 #endif /* RTE_LIBRTE_EAL_HOTPLUG */
729
730 static int
731 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
732 {
733         uint16_t old_nb_queues = dev->data->nb_rx_queues;
734         void **rxq;
735         unsigned i;
736
737         if (dev->data->rx_queues == NULL) { /* first time configuration */
738                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
739                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
740                                 RTE_CACHE_LINE_SIZE);
741                 if (dev->data->rx_queues == NULL) {
742                         dev->data->nb_rx_queues = 0;
743                         return -(ENOMEM);
744                 }
745 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
746                 dev->post_rx_burst_cbs = rte_zmalloc(
747                         "ethdev->post_rx_burst_cbs",
748                         sizeof(*dev->post_rx_burst_cbs) * nb_queues,
749                         RTE_CACHE_LINE_SIZE);
750                 if (dev->post_rx_burst_cbs == NULL) {
751                         rte_free(dev->data->rx_queues);
752                         dev->data->rx_queues = NULL;
753                         dev->data->nb_rx_queues = 0;
754                         return -ENOMEM;
755                 }
756 #endif
757
758         } else { /* re-configure */
759                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
760
761                 rxq = dev->data->rx_queues;
762
763                 for (i = nb_queues; i < old_nb_queues; i++)
764                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
765                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
766                                 RTE_CACHE_LINE_SIZE);
767                 if (rxq == NULL)
768                         return -(ENOMEM);
769 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
770                 dev->post_rx_burst_cbs = rte_realloc(
771                         dev->post_rx_burst_cbs,
772                         sizeof(*dev->post_rx_burst_cbs) *
773                                 nb_queues, RTE_CACHE_LINE_SIZE);
774                 if (dev->post_rx_burst_cbs == NULL)
775                         return -ENOMEM;
776 #endif
777                 if (nb_queues > old_nb_queues) {
778                         uint16_t new_qs = nb_queues - old_nb_queues;
779                         memset(rxq + old_nb_queues, 0,
780                                 sizeof(rxq[0]) * new_qs);
781 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
782                         memset(dev->post_rx_burst_cbs + old_nb_queues, 0,
783                                 sizeof(dev->post_rx_burst_cbs[0]) * new_qs);
784 #endif
785                 }
786
787                 dev->data->rx_queues = rxq;
788
789         }
790         dev->data->nb_rx_queues = nb_queues;
791         return (0);
792 }
793
794 int
795 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
796 {
797         struct rte_eth_dev *dev;
798
799         /* This function is only safe when called from the primary process
800          * in a multi-process setup*/
801         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
802
803         if (!rte_eth_dev_is_valid_port(port_id)) {
804                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
805                 return -EINVAL;
806         }
807
808         dev = &rte_eth_devices[port_id];
809         if (rx_queue_id >= dev->data->nb_rx_queues) {
810                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
811                 return -EINVAL;
812         }
813
814         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
815
816         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
817
818 }
819
820 int
821 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
822 {
823         struct rte_eth_dev *dev;
824
825         /* This function is only safe when called from the primary process
826          * in a multi-process setup*/
827         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
828
829         if (!rte_eth_dev_is_valid_port(port_id)) {
830                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
831                 return -EINVAL;
832         }
833
834         dev = &rte_eth_devices[port_id];
835         if (rx_queue_id >= dev->data->nb_rx_queues) {
836                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
837                 return -EINVAL;
838         }
839
840         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
841
842         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
843
844 }
845
846 int
847 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
848 {
849         struct rte_eth_dev *dev;
850
851         /* This function is only safe when called from the primary process
852          * in a multi-process setup*/
853         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
854
855         if (!rte_eth_dev_is_valid_port(port_id)) {
856                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
857                 return -EINVAL;
858         }
859
860         dev = &rte_eth_devices[port_id];
861         if (tx_queue_id >= dev->data->nb_tx_queues) {
862                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
863                 return -EINVAL;
864         }
865
866         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
867
868         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
869
870 }
871
872 int
873 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
874 {
875         struct rte_eth_dev *dev;
876
877         /* This function is only safe when called from the primary process
878          * in a multi-process setup*/
879         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
880
881         if (!rte_eth_dev_is_valid_port(port_id)) {
882                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
883                 return -EINVAL;
884         }
885
886         dev = &rte_eth_devices[port_id];
887         if (tx_queue_id >= dev->data->nb_tx_queues) {
888                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
889                 return -EINVAL;
890         }
891
892         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
893
894         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
895
896 }
897
898 static int
899 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
900 {
901         uint16_t old_nb_queues = dev->data->nb_tx_queues;
902         void **txq;
903         unsigned i;
904
905         if (dev->data->tx_queues == NULL) { /* first time configuration */
906                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
907                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
908                                 RTE_CACHE_LINE_SIZE);
909                 if (dev->data->tx_queues == NULL) {
910                         dev->data->nb_tx_queues = 0;
911                         return -(ENOMEM);
912                 }
913 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
914                 dev->pre_tx_burst_cbs = rte_zmalloc(
915                         "ethdev->pre_tx_burst_cbs",
916                         sizeof(*dev->pre_tx_burst_cbs) * nb_queues,
917                         RTE_CACHE_LINE_SIZE);
918                 if (dev->pre_tx_burst_cbs == NULL) {
919                         rte_free(dev->data->tx_queues);
920                         dev->data->tx_queues = NULL;
921                         dev->data->nb_tx_queues = 0;
922                         return -ENOMEM;
923                 }
924 #endif
925
926         } else { /* re-configure */
927                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
928
929                 txq = dev->data->tx_queues;
930
931                 for (i = nb_queues; i < old_nb_queues; i++)
932                         (*dev->dev_ops->tx_queue_release)(txq[i]);
933                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
934                                 RTE_CACHE_LINE_SIZE);
935                 if (txq == NULL)
936                         return -ENOMEM;
937 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
938                 dev->pre_tx_burst_cbs = rte_realloc(
939                         dev->pre_tx_burst_cbs,
940                         sizeof(*dev->pre_tx_burst_cbs) *
941                                 nb_queues, RTE_CACHE_LINE_SIZE);
942                 if (dev->pre_tx_burst_cbs == NULL)
943                         return -ENOMEM;
944 #endif
945                 if (nb_queues > old_nb_queues) {
946                         uint16_t new_qs = nb_queues - old_nb_queues;
947                         memset(txq + old_nb_queues, 0,
948                                 sizeof(txq[0]) * new_qs);
949 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
950                         memset(dev->pre_tx_burst_cbs + old_nb_queues, 0,
951                                 sizeof(dev->pre_tx_burst_cbs[0]) * new_qs);
952 #endif
953                 }
954
955                 dev->data->tx_queues = txq;
956
957         }
958         dev->data->nb_tx_queues = nb_queues;
959         return (0);
960 }
961
962 static int
963 rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
964 {
965         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
966         switch (nb_rx_q) {
967         case 1:
968         case 2:
969                 RTE_ETH_DEV_SRIOV(dev).active =
970                         ETH_64_POOLS;
971                 break;
972         case 4:
973                 RTE_ETH_DEV_SRIOV(dev).active =
974                         ETH_32_POOLS;
975                 break;
976         default:
977                 return -EINVAL;
978         }
979
980         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
981         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
982                 dev->pci_dev->max_vfs * nb_rx_q;
983
984         return 0;
985 }
986
987 static int
988 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
989                       const struct rte_eth_conf *dev_conf)
990 {
991         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
992
993         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
994                 /* check multi-queue mode */
995                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
996                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
997                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
998                         /* SRIOV only works in VMDq enable mode */
999                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
1000                                         " SRIOV active, "
1001                                         "wrong VMDQ mq_mode rx %u tx %u\n",
1002                                         port_id,
1003                                         dev_conf->rxmode.mq_mode,
1004                                         dev_conf->txmode.mq_mode);
1005                         return (-EINVAL);
1006                 }
1007
1008                 switch (dev_conf->rxmode.mq_mode) {
1009                 case ETH_MQ_RX_VMDQ_DCB:
1010                 case ETH_MQ_RX_VMDQ_DCB_RSS:
1011                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1012                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
1013                                         " SRIOV active, "
1014                                         "unsupported VMDQ mq_mode rx %u\n",
1015                                         port_id, dev_conf->rxmode.mq_mode);
1016                         return (-EINVAL);
1017                 case ETH_MQ_RX_RSS:
1018                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
1019                                         " SRIOV active, "
1020                                         "Rx mq mode is changed from:"
1021                                         "mq_mode %u into VMDQ mq_mode %u\n",
1022                                         port_id,
1023                                         dev_conf->rxmode.mq_mode,
1024                                         dev->data->dev_conf.rxmode.mq_mode);
1025                 case ETH_MQ_RX_VMDQ_RSS:
1026                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1027                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1028                                 if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) {
1029                                         PMD_DEBUG_TRACE("ethdev port_id=%d"
1030                                                 " SRIOV active, invalid queue"
1031                                                 " number for VMDQ RSS, allowed"
1032                                                 " value are 1, 2 or 4\n",
1033                                                 port_id);
1034                                         return -EINVAL;
1035                                 }
1036                         break;
1037                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
1038                         /* if nothing mq mode configure, use default scheme */
1039                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
1040                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
1041                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1042                         break;
1043                 }
1044
1045                 switch (dev_conf->txmode.mq_mode) {
1046                 case ETH_MQ_TX_VMDQ_DCB:
1047                         /* DCB VMDQ in SRIOV mode, not implement yet */
1048                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
1049                                         " SRIOV active, "
1050                                         "unsupported VMDQ mq_mode tx %u\n",
1051                                         port_id, dev_conf->txmode.mq_mode);
1052                         return (-EINVAL);
1053                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1054                         /* if nothing mq mode configure, use default scheme */
1055                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
1056                         break;
1057                 }
1058
1059                 /* check valid queue number */
1060                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1061                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1062                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
1063                                     "queue number must less equal to %d\n",
1064                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1065                         return (-EINVAL);
1066                 }
1067         } else {
1068                 /* For vmdb+dcb mode check our configuration before we go further */
1069                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1070                         const struct rte_eth_vmdq_dcb_conf *conf;
1071
1072                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
1073                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
1074                                                 "!= %d\n",
1075                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
1076                                 return (-EINVAL);
1077                         }
1078                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
1079                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
1080                                conf->nb_queue_pools == ETH_32_POOLS)) {
1081                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
1082                                                 "nb_queue_pools must be %d or %d\n",
1083                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
1084                                 return (-EINVAL);
1085                         }
1086                 }
1087                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1088                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
1089
1090                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
1091                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
1092                                                 "!= %d\n",
1093                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
1094                                 return (-EINVAL);
1095                         }
1096                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
1097                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
1098                                conf->nb_queue_pools == ETH_32_POOLS)) {
1099                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
1100                                                 "nb_queue_pools != %d or nb_queue_pools "
1101                                                 "!= %d\n",
1102                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
1103                                 return (-EINVAL);
1104                         }
1105                 }
1106
1107                 /* For DCB mode check our configuration before we go further */
1108                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1109                         const struct rte_eth_dcb_rx_conf *conf;
1110
1111                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
1112                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
1113                                                 "!= %d\n",
1114                                                 port_id, ETH_DCB_NUM_QUEUES);
1115                                 return (-EINVAL);
1116                         }
1117                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
1118                         if (! (conf->nb_tcs == ETH_4_TCS ||
1119                                conf->nb_tcs == ETH_8_TCS)) {
1120                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
1121                                                 "nb_tcs != %d or nb_tcs "
1122                                                 "!= %d\n",
1123                                                 port_id, ETH_4_TCS, ETH_8_TCS);
1124                                 return (-EINVAL);
1125                         }
1126                 }
1127
1128                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1129                         const struct rte_eth_dcb_tx_conf *conf;
1130
1131                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
1132                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
1133                                                 "!= %d\n",
1134                                                 port_id, ETH_DCB_NUM_QUEUES);
1135                                 return (-EINVAL);
1136                         }
1137                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
1138                         if (! (conf->nb_tcs == ETH_4_TCS ||
1139                                conf->nb_tcs == ETH_8_TCS)) {
1140                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
1141                                                 "nb_tcs != %d or nb_tcs "
1142                                                 "!= %d\n",
1143                                                 port_id, ETH_4_TCS, ETH_8_TCS);
1144                                 return (-EINVAL);
1145                         }
1146                 }
1147         }
1148         return 0;
1149 }
1150
1151 int
1152 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1153                       const struct rte_eth_conf *dev_conf)
1154 {
1155         struct rte_eth_dev *dev;
1156         struct rte_eth_dev_info dev_info;
1157         int diag;
1158
1159         /* This function is only safe when called from the primary process
1160          * in a multi-process setup*/
1161         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1162
1163         if (!rte_eth_dev_is_valid_port(port_id)) {
1164                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1165                 return (-EINVAL);
1166         }
1167
1168         dev = &rte_eth_devices[port_id];
1169
1170         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1171         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1172
1173         if (dev->data->dev_started) {
1174                 PMD_DEBUG_TRACE(
1175                     "port %d must be stopped to allow configuration\n", port_id);
1176                 return (-EBUSY);
1177         }
1178
1179         /*
1180          * Check that the numbers of RX and TX queues are not greater
1181          * than the maximum number of RX and TX queues supported by the
1182          * configured device.
1183          */
1184         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1185         if (nb_rx_q > dev_info.max_rx_queues) {
1186                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1187                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1188                 return (-EINVAL);
1189         }
1190         if (nb_rx_q == 0) {
1191                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
1192                 return (-EINVAL);
1193         }
1194
1195         if (nb_tx_q > dev_info.max_tx_queues) {
1196                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1197                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1198                 return (-EINVAL);
1199         }
1200         if (nb_tx_q == 0) {
1201                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
1202                 return (-EINVAL);
1203         }
1204
1205         /* Copy the dev_conf parameter into the dev structure */
1206         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
1207
1208         /*
1209          * If link state interrupt is enabled, check that the
1210          * device supports it.
1211          */
1212         if (dev_conf->intr_conf.lsc == 1) {
1213                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
1214
1215                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
1216                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1217                                         pci_drv->name);
1218                         return (-EINVAL);
1219                 }
1220         }
1221
1222         /*
1223          * If jumbo frames are enabled, check that the maximum RX packet
1224          * length is supported by the configured device.
1225          */
1226         if (dev_conf->rxmode.jumbo_frame == 1) {
1227                 if (dev_conf->rxmode.max_rx_pkt_len >
1228                     dev_info.max_rx_pktlen) {
1229                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1230                                 " > max valid value %u\n",
1231                                 port_id,
1232                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1233                                 (unsigned)dev_info.max_rx_pktlen);
1234                         return (-EINVAL);
1235                 }
1236                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1237                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1238                                 " < min valid value %u\n",
1239                                 port_id,
1240                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1241                                 (unsigned)ETHER_MIN_LEN);
1242                         return (-EINVAL);
1243                 }
1244         } else {
1245                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1246                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1247                         /* Use default value */
1248                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1249                                                         ETHER_MAX_LEN;
1250         }
1251
1252         /* multipe queue mode checking */
1253         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
1254         if (diag != 0) {
1255                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
1256                                 port_id, diag);
1257                 return diag;
1258         }
1259
1260         /*
1261          * Setup new number of RX/TX queues and reconfigure device.
1262          */
1263         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1264         if (diag != 0) {
1265                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1266                                 port_id, diag);
1267                 return diag;
1268         }
1269
1270         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1271         if (diag != 0) {
1272                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1273                                 port_id, diag);
1274                 rte_eth_dev_rx_queue_config(dev, 0);
1275                 return diag;
1276         }
1277
1278         diag = (*dev->dev_ops->dev_configure)(dev);
1279         if (diag != 0) {
1280                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1281                                 port_id, diag);
1282                 rte_eth_dev_rx_queue_config(dev, 0);
1283                 rte_eth_dev_tx_queue_config(dev, 0);
1284                 return diag;
1285         }
1286
1287         return 0;
1288 }
1289
1290 static void
1291 rte_eth_dev_config_restore(uint8_t port_id)
1292 {
1293         struct rte_eth_dev *dev;
1294         struct rte_eth_dev_info dev_info;
1295         struct ether_addr addr;
1296         uint16_t i;
1297         uint32_t pool = 0;
1298
1299         dev = &rte_eth_devices[port_id];
1300
1301         rte_eth_dev_info_get(port_id, &dev_info);
1302
1303         if (RTE_ETH_DEV_SRIOV(dev).active)
1304                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1305
1306         /* replay MAC address configuration */
1307         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1308                 addr = dev->data->mac_addrs[i];
1309
1310                 /* skip zero address */
1311                 if (is_zero_ether_addr(&addr))
1312                         continue;
1313
1314                 /* add address to the hardware */
1315                 if  (*dev->dev_ops->mac_addr_add &&
1316                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1317                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1318                 else {
1319                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1320                                         port_id);
1321                         /* exit the loop but not return an error */
1322                         break;
1323                 }
1324         }
1325
1326         /* replay promiscuous configuration */
1327         if (rte_eth_promiscuous_get(port_id) == 1)
1328                 rte_eth_promiscuous_enable(port_id);
1329         else if (rte_eth_promiscuous_get(port_id) == 0)
1330                 rte_eth_promiscuous_disable(port_id);
1331
1332         /* replay allmulticast configuration */
1333         if (rte_eth_allmulticast_get(port_id) == 1)
1334                 rte_eth_allmulticast_enable(port_id);
1335         else if (rte_eth_allmulticast_get(port_id) == 0)
1336                 rte_eth_allmulticast_disable(port_id);
1337 }
1338
1339 int
1340 rte_eth_dev_start(uint8_t port_id)
1341 {
1342         struct rte_eth_dev *dev;
1343         int diag;
1344
1345         /* This function is only safe when called from the primary process
1346          * in a multi-process setup*/
1347         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1348
1349         if (!rte_eth_dev_is_valid_port(port_id)) {
1350                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1351                 return (-EINVAL);
1352         }
1353
1354         dev = &rte_eth_devices[port_id];
1355
1356         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1357
1358         if (dev->data->dev_started != 0) {
1359                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1360                         " already started\n",
1361                         port_id);
1362                 return (0);
1363         }
1364
1365         diag = (*dev->dev_ops->dev_start)(dev);
1366         if (diag == 0)
1367                 dev->data->dev_started = 1;
1368         else
1369                 return diag;
1370
1371         rte_eth_dev_config_restore(port_id);
1372
1373         if (dev->data->dev_conf.intr_conf.lsc != 0) {
1374                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1375                 (*dev->dev_ops->link_update)(dev, 0);
1376         }
1377         return 0;
1378 }
1379
1380 void
1381 rte_eth_dev_stop(uint8_t port_id)
1382 {
1383         struct rte_eth_dev *dev;
1384
1385         /* This function is only safe when called from the primary process
1386          * in a multi-process setup*/
1387         PROC_PRIMARY_OR_RET();
1388
1389         if (!rte_eth_dev_is_valid_port(port_id)) {
1390                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1391                 return;
1392         }
1393
1394         dev = &rte_eth_devices[port_id];
1395
1396         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1397
1398         if (dev->data->dev_started == 0) {
1399                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1400                         " already stopped\n",
1401                         port_id);
1402                 return;
1403         }
1404
1405         dev->data->dev_started = 0;
1406         (*dev->dev_ops->dev_stop)(dev);
1407 }
1408
1409 int
1410 rte_eth_dev_set_link_up(uint8_t port_id)
1411 {
1412         struct rte_eth_dev *dev;
1413
1414         /* This function is only safe when called from the primary process
1415          * in a multi-process setup*/
1416         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1417
1418         if (!rte_eth_dev_is_valid_port(port_id)) {
1419                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1420                 return -EINVAL;
1421         }
1422
1423         dev = &rte_eth_devices[port_id];
1424
1425         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1426         return (*dev->dev_ops->dev_set_link_up)(dev);
1427 }
1428
1429 int
1430 rte_eth_dev_set_link_down(uint8_t port_id)
1431 {
1432         struct rte_eth_dev *dev;
1433
1434         /* This function is only safe when called from the primary process
1435          * in a multi-process setup*/
1436         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1437
1438         if (!rte_eth_dev_is_valid_port(port_id)) {
1439                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1440                 return -EINVAL;
1441         }
1442
1443         dev = &rte_eth_devices[port_id];
1444
1445         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1446         return (*dev->dev_ops->dev_set_link_down)(dev);
1447 }
1448
1449 void
1450 rte_eth_dev_close(uint8_t port_id)
1451 {
1452         struct rte_eth_dev *dev;
1453
1454         /* This function is only safe when called from the primary process
1455          * in a multi-process setup*/
1456         PROC_PRIMARY_OR_RET();
1457
1458         if (!rte_eth_dev_is_valid_port(port_id)) {
1459                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1460                 return;
1461         }
1462
1463         dev = &rte_eth_devices[port_id];
1464
1465         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1466         dev->data->dev_started = 0;
1467         (*dev->dev_ops->dev_close)(dev);
1468 }
1469
1470 int
1471 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1472                        uint16_t nb_rx_desc, unsigned int socket_id,
1473                        const struct rte_eth_rxconf *rx_conf,
1474                        struct rte_mempool *mp)
1475 {
1476         int ret;
1477         uint32_t mbp_buf_size;
1478         struct rte_eth_dev *dev;
1479         struct rte_pktmbuf_pool_private *mbp_priv;
1480         struct rte_eth_dev_info dev_info;
1481
1482         /* This function is only safe when called from the primary process
1483          * in a multi-process setup*/
1484         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1485
1486         if (!rte_eth_dev_is_valid_port(port_id)) {
1487                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1488                 return (-EINVAL);
1489         }
1490
1491         dev = &rte_eth_devices[port_id];
1492         if (rx_queue_id >= dev->data->nb_rx_queues) {
1493                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1494                 return (-EINVAL);
1495         }
1496
1497         if (dev->data->dev_started) {
1498                 PMD_DEBUG_TRACE(
1499                     "port %d must be stopped to allow configuration\n", port_id);
1500                 return -EBUSY;
1501         }
1502
1503         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1504         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1505
1506         /*
1507          * Check the size of the mbuf data buffer.
1508          * This value must be provided in the private data of the memory pool.
1509          * First check that the memory pool has a valid private data.
1510          */
1511         rte_eth_dev_info_get(port_id, &dev_info);
1512         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1513                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1514                                 mp->name, (int) mp->private_data_size,
1515                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1516                 return (-ENOSPC);
1517         }
1518         mbp_priv = rte_mempool_get_priv(mp);
1519         mbp_buf_size = mbp_priv->mbuf_data_room_size;
1520
1521         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1522                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1523                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1524                                 "=%d)\n",
1525                                 mp->name,
1526                                 (int)mbp_buf_size,
1527                                 (int)(RTE_PKTMBUF_HEADROOM +
1528                                       dev_info.min_rx_bufsize),
1529                                 (int)RTE_PKTMBUF_HEADROOM,
1530                                 (int)dev_info.min_rx_bufsize);
1531                 return (-EINVAL);
1532         }
1533
1534         if (rx_conf == NULL)
1535                 rx_conf = &dev_info.default_rxconf;
1536
1537         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1538                                               socket_id, rx_conf, mp);
1539         if (!ret) {
1540                 if (!dev->data->min_rx_buf_size ||
1541                     dev->data->min_rx_buf_size > mbp_buf_size)
1542                         dev->data->min_rx_buf_size = mbp_buf_size;
1543         }
1544
1545         return ret;
1546 }
1547
1548 int
1549 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1550                        uint16_t nb_tx_desc, unsigned int socket_id,
1551                        const struct rte_eth_txconf *tx_conf)
1552 {
1553         struct rte_eth_dev *dev;
1554         struct rte_eth_dev_info dev_info;
1555
1556         /* This function is only safe when called from the primary process
1557          * in a multi-process setup*/
1558         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1559
1560         if (!rte_eth_dev_is_valid_port(port_id)) {
1561                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1562                 return (-EINVAL);
1563         }
1564
1565         dev = &rte_eth_devices[port_id];
1566         if (tx_queue_id >= dev->data->nb_tx_queues) {
1567                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1568                 return (-EINVAL);
1569         }
1570
1571         if (dev->data->dev_started) {
1572                 PMD_DEBUG_TRACE(
1573                     "port %d must be stopped to allow configuration\n", port_id);
1574                 return -EBUSY;
1575         }
1576
1577         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1578         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1579
1580         rte_eth_dev_info_get(port_id, &dev_info);
1581
1582         if (tx_conf == NULL)
1583                 tx_conf = &dev_info.default_txconf;
1584
1585         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1586                                                socket_id, tx_conf);
1587 }
1588
1589 void
1590 rte_eth_promiscuous_enable(uint8_t port_id)
1591 {
1592         struct rte_eth_dev *dev;
1593
1594         if (!rte_eth_dev_is_valid_port(port_id)) {
1595                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1596                 return;
1597         }
1598
1599         dev = &rte_eth_devices[port_id];
1600
1601         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1602         (*dev->dev_ops->promiscuous_enable)(dev);
1603         dev->data->promiscuous = 1;
1604 }
1605
1606 void
1607 rte_eth_promiscuous_disable(uint8_t port_id)
1608 {
1609         struct rte_eth_dev *dev;
1610
1611         if (!rte_eth_dev_is_valid_port(port_id)) {
1612                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1613                 return;
1614         }
1615
1616         dev = &rte_eth_devices[port_id];
1617
1618         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1619         dev->data->promiscuous = 0;
1620         (*dev->dev_ops->promiscuous_disable)(dev);
1621 }
1622
1623 int
1624 rte_eth_promiscuous_get(uint8_t port_id)
1625 {
1626         struct rte_eth_dev *dev;
1627
1628         if (!rte_eth_dev_is_valid_port(port_id)) {
1629                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1630                 return -1;
1631         }
1632
1633         dev = &rte_eth_devices[port_id];
1634         return dev->data->promiscuous;
1635 }
1636
1637 void
1638 rte_eth_allmulticast_enable(uint8_t port_id)
1639 {
1640         struct rte_eth_dev *dev;
1641
1642         if (!rte_eth_dev_is_valid_port(port_id)) {
1643                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1644                 return;
1645         }
1646
1647         dev = &rte_eth_devices[port_id];
1648
1649         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1650         (*dev->dev_ops->allmulticast_enable)(dev);
1651         dev->data->all_multicast = 1;
1652 }
1653
1654 void
1655 rte_eth_allmulticast_disable(uint8_t port_id)
1656 {
1657         struct rte_eth_dev *dev;
1658
1659         if (!rte_eth_dev_is_valid_port(port_id)) {
1660                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1661                 return;
1662         }
1663
1664         dev = &rte_eth_devices[port_id];
1665
1666         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1667         dev->data->all_multicast = 0;
1668         (*dev->dev_ops->allmulticast_disable)(dev);
1669 }
1670
1671 int
1672 rte_eth_allmulticast_get(uint8_t port_id)
1673 {
1674         struct rte_eth_dev *dev;
1675
1676         if (!rte_eth_dev_is_valid_port(port_id)) {
1677                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1678                 return -1;
1679         }
1680
1681         dev = &rte_eth_devices[port_id];
1682         return dev->data->all_multicast;
1683 }
1684
1685 static inline int
1686 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1687                                 struct rte_eth_link *link)
1688 {
1689         struct rte_eth_link *dst = link;
1690         struct rte_eth_link *src = &(dev->data->dev_link);
1691
1692         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1693                                         *(uint64_t *)src) == 0)
1694                 return -1;
1695
1696         return 0;
1697 }
1698
1699 void
1700 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1701 {
1702         struct rte_eth_dev *dev;
1703
1704         if (!rte_eth_dev_is_valid_port(port_id)) {
1705                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1706                 return;
1707         }
1708
1709         dev = &rte_eth_devices[port_id];
1710
1711         if (dev->data->dev_conf.intr_conf.lsc != 0)
1712                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1713         else {
1714                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1715                 (*dev->dev_ops->link_update)(dev, 1);
1716                 *eth_link = dev->data->dev_link;
1717         }
1718 }
1719
1720 void
1721 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1722 {
1723         struct rte_eth_dev *dev;
1724
1725         if (!rte_eth_dev_is_valid_port(port_id)) {
1726                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1727                 return;
1728         }
1729
1730         dev = &rte_eth_devices[port_id];
1731
1732         if (dev->data->dev_conf.intr_conf.lsc != 0)
1733                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1734         else {
1735                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1736                 (*dev->dev_ops->link_update)(dev, 0);
1737                 *eth_link = dev->data->dev_link;
1738         }
1739 }
1740
1741 int
1742 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1743 {
1744         struct rte_eth_dev *dev;
1745
1746         if (!rte_eth_dev_is_valid_port(port_id)) {
1747                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1748                 return (-ENODEV);
1749         }
1750
1751         dev = &rte_eth_devices[port_id];
1752         memset(stats, 0, sizeof(*stats));
1753
1754         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1755         (*dev->dev_ops->stats_get)(dev, stats);
1756         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1757         return 0;
1758 }
1759
1760 void
1761 rte_eth_stats_reset(uint8_t port_id)
1762 {
1763         struct rte_eth_dev *dev;
1764
1765         if (!rte_eth_dev_is_valid_port(port_id)) {
1766                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1767                 return;
1768         }
1769
1770         dev = &rte_eth_devices[port_id];
1771
1772         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1773         (*dev->dev_ops->stats_reset)(dev);
1774 }
1775
1776 /* retrieve ethdev extended statistics */
1777 int
1778 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1779         unsigned n)
1780 {
1781         struct rte_eth_stats eth_stats;
1782         struct rte_eth_dev *dev;
1783         unsigned count, i, q;
1784         uint64_t val;
1785         char *stats_ptr;
1786
1787         if (!rte_eth_dev_is_valid_port(port_id)) {
1788                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1789                 return -1;
1790         }
1791
1792         dev = &rte_eth_devices[port_id];
1793
1794         /* implemented by the driver */
1795         if (dev->dev_ops->xstats_get != NULL)
1796                 return (*dev->dev_ops->xstats_get)(dev, xstats, n);
1797
1798         /* else, return generic statistics */
1799         count = RTE_NB_STATS;
1800         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1801         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1802         if (n < count)
1803                 return count;
1804
1805         /* now fill the xstats structure */
1806
1807         count = 0;
1808         memset(&eth_stats, 0, sizeof(eth_stats));
1809         rte_eth_stats_get(port_id, &eth_stats);
1810
1811         /* global stats */
1812         for (i = 0; i < RTE_NB_STATS; i++) {
1813                 stats_ptr = (char *)&eth_stats + rte_stats_strings[i].offset;
1814                 val = *(uint64_t *)stats_ptr;
1815                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1816                         "%s", rte_stats_strings[i].name);
1817                 xstats[count++].value = val;
1818         }
1819
1820         /* per-rxq stats */
1821         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1822                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1823                         stats_ptr = (char *)&eth_stats;
1824                         stats_ptr += rte_rxq_stats_strings[i].offset;
1825                         stats_ptr += q * sizeof(uint64_t);
1826                         val = *(uint64_t *)stats_ptr;
1827                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1828                                 "rx_queue_%u_%s", q,
1829                                 rte_rxq_stats_strings[i].name);
1830                         xstats[count++].value = val;
1831                 }
1832         }
1833
1834         /* per-txq stats */
1835         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1836                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1837                         stats_ptr = (char *)&eth_stats;
1838                         stats_ptr += rte_txq_stats_strings[i].offset;
1839                         stats_ptr += q * sizeof(uint64_t);
1840                         val = *(uint64_t *)stats_ptr;
1841                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1842                                 "tx_queue_%u_%s", q,
1843                                 rte_txq_stats_strings[i].name);
1844                         xstats[count++].value = val;
1845                 }
1846         }
1847
1848         return count;
1849 }
1850
1851 /* reset ethdev extended statistics */
1852 void
1853 rte_eth_xstats_reset(uint8_t port_id)
1854 {
1855         struct rte_eth_dev *dev;
1856
1857         if (!rte_eth_dev_is_valid_port(port_id)) {
1858                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1859                 return;
1860         }
1861
1862         dev = &rte_eth_devices[port_id];
1863
1864         /* implemented by the driver */
1865         if (dev->dev_ops->xstats_reset != NULL) {
1866                 (*dev->dev_ops->xstats_reset)(dev);
1867                 return;
1868         }
1869
1870         /* fallback to default */
1871         rte_eth_stats_reset(port_id);
1872 }
1873
1874 static int
1875 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1876                 uint8_t is_rx)
1877 {
1878         struct rte_eth_dev *dev;
1879
1880         if (!rte_eth_dev_is_valid_port(port_id)) {
1881                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1882                 return -ENODEV;
1883         }
1884
1885         dev = &rte_eth_devices[port_id];
1886
1887         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1888         return (*dev->dev_ops->queue_stats_mapping_set)
1889                         (dev, queue_id, stat_idx, is_rx);
1890 }
1891
1892
1893 int
1894 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1895                 uint8_t stat_idx)
1896 {
1897         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1898                         STAT_QMAP_TX);
1899 }
1900
1901
1902 int
1903 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1904                 uint8_t stat_idx)
1905 {
1906         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1907                         STAT_QMAP_RX);
1908 }
1909
1910
1911 void
1912 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1913 {
1914         struct rte_eth_dev *dev;
1915
1916         if (!rte_eth_dev_is_valid_port(port_id)) {
1917                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1918                 return;
1919         }
1920
1921         dev = &rte_eth_devices[port_id];
1922
1923         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1924
1925         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1926         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1927         dev_info->pci_dev = dev->pci_dev;
1928         if (dev->driver)
1929                 dev_info->driver_name = dev->driver->pci_drv.name;
1930 }
1931
1932 void
1933 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1934 {
1935         struct rte_eth_dev *dev;
1936
1937         if (!rte_eth_dev_is_valid_port(port_id)) {
1938                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1939                 return;
1940         }
1941
1942         dev = &rte_eth_devices[port_id];
1943         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1944 }
1945
1946
1947 int
1948 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1949 {
1950         struct rte_eth_dev *dev;
1951
1952         if (!rte_eth_dev_is_valid_port(port_id)) {
1953                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1954                 return (-ENODEV);
1955         }
1956
1957         dev = &rte_eth_devices[port_id];
1958         *mtu = dev->data->mtu;
1959         return 0;
1960 }
1961
1962 int
1963 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1964 {
1965         int ret;
1966         struct rte_eth_dev *dev;
1967
1968         if (!rte_eth_dev_is_valid_port(port_id)) {
1969                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1970                 return (-ENODEV);
1971         }
1972
1973         dev = &rte_eth_devices[port_id];
1974         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1975
1976         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1977         if (!ret)
1978                 dev->data->mtu = mtu;
1979
1980         return ret;
1981 }
1982
1983 int
1984 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1985 {
1986         struct rte_eth_dev *dev;
1987
1988         if (!rte_eth_dev_is_valid_port(port_id)) {
1989                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1990                 return (-ENODEV);
1991         }
1992
1993         dev = &rte_eth_devices[port_id];
1994         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1995                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1996                 return (-ENOSYS);
1997         }
1998
1999         if (vlan_id > 4095) {
2000                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2001                                 port_id, (unsigned) vlan_id);
2002                 return (-EINVAL);
2003         }
2004         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2005
2006         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2007 }
2008
2009 int
2010 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
2011 {
2012         struct rte_eth_dev *dev;
2013
2014         if (!rte_eth_dev_is_valid_port(port_id)) {
2015                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2016                 return (-ENODEV);
2017         }
2018
2019         dev = &rte_eth_devices[port_id];
2020         if (rx_queue_id >= dev->data->nb_rx_queues) {
2021                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2022                 return (-EINVAL);
2023         }
2024
2025         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2026         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2027
2028         return (0);
2029 }
2030
2031 int
2032 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
2033 {
2034         struct rte_eth_dev *dev;
2035
2036         if (!rte_eth_dev_is_valid_port(port_id)) {
2037                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2038                 return (-ENODEV);
2039         }
2040
2041         dev = &rte_eth_devices[port_id];
2042         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2043         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
2044
2045         return (0);
2046 }
2047
2048 int
2049 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
2050 {
2051         struct rte_eth_dev *dev;
2052         int ret = 0;
2053         int mask = 0;
2054         int cur, org = 0;
2055
2056         if (!rte_eth_dev_is_valid_port(port_id)) {
2057                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2058                 return (-ENODEV);
2059         }
2060
2061         dev = &rte_eth_devices[port_id];
2062
2063         /*check which option changed by application*/
2064         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2065         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
2066         if (cur != org){
2067                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
2068                 mask |= ETH_VLAN_STRIP_MASK;
2069         }
2070
2071         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2072         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
2073         if (cur != org){
2074                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
2075                 mask |= ETH_VLAN_FILTER_MASK;
2076         }
2077
2078         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2079         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
2080         if (cur != org){
2081                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
2082                 mask |= ETH_VLAN_EXTEND_MASK;
2083         }
2084
2085         /*no change*/
2086         if(mask == 0)
2087                 return ret;
2088
2089         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2090         (*dev->dev_ops->vlan_offload_set)(dev, mask);
2091
2092         return ret;
2093 }
2094
2095 int
2096 rte_eth_dev_get_vlan_offload(uint8_t port_id)
2097 {
2098         struct rte_eth_dev *dev;
2099         int ret = 0;
2100
2101         if (!rte_eth_dev_is_valid_port(port_id)) {
2102                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2103                 return (-ENODEV);
2104         }
2105
2106         dev = &rte_eth_devices[port_id];
2107
2108         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2109                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
2110
2111         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2112                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
2113
2114         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2115                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
2116
2117         return ret;
2118 }
2119
2120 int
2121 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
2122 {
2123         struct rte_eth_dev *dev;
2124
2125         if (!rte_eth_dev_is_valid_port(port_id)) {
2126                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2127                 return (-ENODEV);
2128         }
2129
2130         dev = &rte_eth_devices[port_id];
2131         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2132         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
2133
2134         return 0;
2135 }
2136
2137 int
2138 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
2139                                       struct rte_fdir_filter *fdir_filter,
2140                                       uint8_t queue)
2141 {
2142         struct rte_eth_dev *dev;
2143
2144         if (!rte_eth_dev_is_valid_port(port_id)) {
2145                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2146                 return (-ENODEV);
2147         }
2148
2149         dev = &rte_eth_devices[port_id];
2150
2151         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2152                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2153                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2154                 return (-ENOSYS);
2155         }
2156
2157         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2158              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2159             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2160                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2161                                 "None l4type, source & destinations ports " \
2162                                 "should be null!\n");
2163                 return (-EINVAL);
2164         }
2165
2166         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
2167         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
2168                                                                 queue);
2169 }
2170
2171 int
2172 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
2173                                          struct rte_fdir_filter *fdir_filter,
2174                                          uint8_t queue)
2175 {
2176         struct rte_eth_dev *dev;
2177
2178         if (!rte_eth_dev_is_valid_port(port_id)) {
2179                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2180                 return (-ENODEV);
2181         }
2182
2183         dev = &rte_eth_devices[port_id];
2184
2185         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2186                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2187                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2188                 return (-ENOSYS);
2189         }
2190
2191         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2192              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2193             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2194                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2195                                 "None l4type, source & destinations ports " \
2196                                 "should be null!\n");
2197                 return (-EINVAL);
2198         }
2199
2200         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
2201         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
2202                                                                 queue);
2203
2204 }
2205
2206 int
2207 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
2208                                          struct rte_fdir_filter *fdir_filter)
2209 {
2210         struct rte_eth_dev *dev;
2211
2212         if (!rte_eth_dev_is_valid_port(port_id)) {
2213                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2214                 return (-ENODEV);
2215         }
2216
2217         dev = &rte_eth_devices[port_id];
2218
2219         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
2220                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2221                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2222                 return (-ENOSYS);
2223         }
2224
2225         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2226              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2227             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2228                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2229                                 "None l4type source & destinations ports " \
2230                                 "should be null!\n");
2231                 return (-EINVAL);
2232         }
2233
2234         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
2235         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
2236 }
2237
2238 int
2239 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
2240 {
2241         struct rte_eth_dev *dev;
2242
2243         if (!rte_eth_dev_is_valid_port(port_id)) {
2244                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2245                 return (-ENODEV);
2246         }
2247
2248         dev = &rte_eth_devices[port_id];
2249         if (! (dev->data->dev_conf.fdir_conf.mode)) {
2250                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2251                 return (-ENOSYS);
2252         }
2253
2254         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
2255
2256         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
2257         return (0);
2258 }
2259
2260 int
2261 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
2262                                     struct rte_fdir_filter *fdir_filter,
2263                                     uint16_t soft_id, uint8_t queue,
2264                                     uint8_t drop)
2265 {
2266         struct rte_eth_dev *dev;
2267
2268         if (!rte_eth_dev_is_valid_port(port_id)) {
2269                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2270                 return (-ENODEV);
2271         }
2272
2273         dev = &rte_eth_devices[port_id];
2274
2275         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2276                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2277                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2278                 return (-ENOSYS);
2279         }
2280
2281         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2282              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2283             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2284                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2285                                 "None l4type, source & destinations ports " \
2286                                 "should be null!\n");
2287                 return (-EINVAL);
2288         }
2289
2290         /* For now IPv6 is not supported with perfect filter */
2291         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2292                 return (-ENOTSUP);
2293
2294         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
2295         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
2296                                                                 soft_id, queue,
2297                                                                 drop);
2298 }
2299
2300 int
2301 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
2302                                        struct rte_fdir_filter *fdir_filter,
2303                                        uint16_t soft_id, uint8_t queue,
2304                                        uint8_t drop)
2305 {
2306         struct rte_eth_dev *dev;
2307
2308         if (!rte_eth_dev_is_valid_port(port_id)) {
2309                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2310                 return (-ENODEV);
2311         }
2312
2313         dev = &rte_eth_devices[port_id];
2314
2315         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2316                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2317                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2318                 return (-ENOSYS);
2319         }
2320
2321         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2322              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2323             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2324                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2325                                 "None l4type, source & destinations ports " \
2326                                 "should be null!\n");
2327                 return (-EINVAL);
2328         }
2329
2330         /* For now IPv6 is not supported with perfect filter */
2331         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2332                 return (-ENOTSUP);
2333
2334         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
2335         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
2336                                                         soft_id, queue, drop);
2337 }
2338
2339 int
2340 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
2341                                        struct rte_fdir_filter *fdir_filter,
2342                                        uint16_t soft_id)
2343 {
2344         struct rte_eth_dev *dev;
2345
2346         if (!rte_eth_dev_is_valid_port(port_id)) {
2347                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2348                 return (-ENODEV);
2349         }
2350
2351         dev = &rte_eth_devices[port_id];
2352
2353         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2354                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2355                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2356                 return (-ENOSYS);
2357         }
2358
2359         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2360              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2361             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2362                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2363                                 "None l4type, source & destinations ports " \
2364                                 "should be null!\n");
2365                 return (-EINVAL);
2366         }
2367
2368         /* For now IPv6 is not supported with perfect filter */
2369         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2370                 return (-ENOTSUP);
2371
2372         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
2373         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
2374                                                                 soft_id);
2375 }
2376
2377 int
2378 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
2379 {
2380         struct rte_eth_dev *dev;
2381
2382         if (!rte_eth_dev_is_valid_port(port_id)) {
2383                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2384                 return (-ENODEV);
2385         }
2386
2387         dev = &rte_eth_devices[port_id];
2388         if (! (dev->data->dev_conf.fdir_conf.mode)) {
2389                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2390                 return (-ENOSYS);
2391         }
2392
2393         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
2394         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
2395 }
2396
2397 int
2398 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2399 {
2400         struct rte_eth_dev *dev;
2401
2402         if (!rte_eth_dev_is_valid_port(port_id)) {
2403                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2404                 return (-ENODEV);
2405         }
2406
2407         dev = &rte_eth_devices[port_id];
2408         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2409         memset(fc_conf, 0, sizeof(*fc_conf));
2410         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2411 }
2412
2413 int
2414 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2415 {
2416         struct rte_eth_dev *dev;
2417
2418         if (!rte_eth_dev_is_valid_port(port_id)) {
2419                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2420                 return (-ENODEV);
2421         }
2422
2423         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2424                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2425                 return (-EINVAL);
2426         }
2427
2428         dev = &rte_eth_devices[port_id];
2429         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2430         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2431 }
2432
2433 int
2434 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
2435 {
2436         struct rte_eth_dev *dev;
2437
2438         if (!rte_eth_dev_is_valid_port(port_id)) {
2439                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2440                 return (-ENODEV);
2441         }
2442
2443         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2444                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2445                 return (-EINVAL);
2446         }
2447
2448         dev = &rte_eth_devices[port_id];
2449         /* High water, low water validation are device specific */
2450         if  (*dev->dev_ops->priority_flow_ctrl_set)
2451                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2452         return (-ENOTSUP);
2453 }
2454
2455 static inline int
2456 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2457                         uint16_t reta_size)
2458 {
2459         uint16_t i, num;
2460
2461         if (!reta_conf)
2462                 return -EINVAL;
2463
2464         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
2465                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
2466                                                         RTE_RETA_GROUP_SIZE);
2467                 return -EINVAL;
2468         }
2469
2470         num = reta_size / RTE_RETA_GROUP_SIZE;
2471         for (i = 0; i < num; i++) {
2472                 if (reta_conf[i].mask)
2473                         return 0;
2474         }
2475
2476         return -EINVAL;
2477 }
2478
2479 static inline int
2480 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2481                          uint16_t reta_size,
2482                          uint8_t max_rxq)
2483 {
2484         uint16_t i, idx, shift;
2485
2486         if (!reta_conf)
2487                 return -EINVAL;
2488
2489         if (max_rxq == 0) {
2490                 PMD_DEBUG_TRACE("No receive queue is available\n");
2491                 return -EINVAL;
2492         }
2493
2494         for (i = 0; i < reta_size; i++) {
2495                 idx = i / RTE_RETA_GROUP_SIZE;
2496                 shift = i % RTE_RETA_GROUP_SIZE;
2497                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2498                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2499                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2500                                 "the maximum rxq index: %u\n", idx, shift,
2501                                 reta_conf[idx].reta[shift], max_rxq);
2502                         return -EINVAL;
2503                 }
2504         }
2505
2506         return 0;
2507 }
2508
2509 int
2510 rte_eth_dev_rss_reta_update(uint8_t port_id,
2511                             struct rte_eth_rss_reta_entry64 *reta_conf,
2512                             uint16_t reta_size)
2513 {
2514         struct rte_eth_dev *dev;
2515         int ret;
2516
2517         if (!rte_eth_dev_is_valid_port(port_id)) {
2518                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2519                 return -ENODEV;
2520         }
2521
2522         /* Check mask bits */
2523         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2524         if (ret < 0)
2525                 return ret;
2526
2527         dev = &rte_eth_devices[port_id];
2528
2529         /* Check entry value */
2530         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2531                                 dev->data->nb_rx_queues);
2532         if (ret < 0)
2533                 return ret;
2534
2535         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2536         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2537 }
2538
2539 int
2540 rte_eth_dev_rss_reta_query(uint8_t port_id,
2541                            struct rte_eth_rss_reta_entry64 *reta_conf,
2542                            uint16_t reta_size)
2543 {
2544         struct rte_eth_dev *dev;
2545         int ret;
2546
2547         if (port_id >= nb_ports) {
2548                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2549                 return -ENODEV;
2550         }
2551
2552         /* Check mask bits */
2553         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2554         if (ret < 0)
2555                 return ret;
2556
2557         dev = &rte_eth_devices[port_id];
2558         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2559         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2560 }
2561
2562 int
2563 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2564 {
2565         struct rte_eth_dev *dev;
2566         uint16_t rss_hash_protos;
2567
2568         if (!rte_eth_dev_is_valid_port(port_id)) {
2569                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2570                 return (-ENODEV);
2571         }
2572
2573         rss_hash_protos = rss_conf->rss_hf;
2574         if ((rss_hash_protos != 0) &&
2575             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2576                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2577                                 rss_hash_protos);
2578                 return (-EINVAL);
2579         }
2580         dev = &rte_eth_devices[port_id];
2581         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2582         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2583 }
2584
2585 int
2586 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2587                               struct rte_eth_rss_conf *rss_conf)
2588 {
2589         struct rte_eth_dev *dev;
2590
2591         if (!rte_eth_dev_is_valid_port(port_id)) {
2592                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2593                 return (-ENODEV);
2594         }
2595
2596         dev = &rte_eth_devices[port_id];
2597         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2598         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2599 }
2600
2601 int
2602 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2603                            struct rte_eth_udp_tunnel *udp_tunnel)
2604 {
2605         struct rte_eth_dev *dev;
2606
2607         if (!rte_eth_dev_is_valid_port(port_id)) {
2608                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2609                 return -ENODEV;
2610         }
2611
2612         if (udp_tunnel == NULL) {
2613                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2614                 return -EINVAL;
2615         }
2616
2617         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2618                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2619                 return -EINVAL;
2620         }
2621
2622         dev = &rte_eth_devices[port_id];
2623         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2624         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2625 }
2626
2627 int
2628 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2629                               struct rte_eth_udp_tunnel *udp_tunnel)
2630 {
2631         struct rte_eth_dev *dev;
2632
2633         if (!rte_eth_dev_is_valid_port(port_id)) {
2634                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2635                 return -ENODEV;
2636         }
2637
2638         dev = &rte_eth_devices[port_id];
2639
2640         if (udp_tunnel == NULL) {
2641                 PMD_DEBUG_TRACE("Invalid udp_tunnel parametr\n");
2642                 return -EINVAL;
2643         }
2644
2645         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2646                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2647                 return -EINVAL;
2648         }
2649
2650         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2651         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2652 }
2653
2654 int
2655 rte_eth_led_on(uint8_t port_id)
2656 {
2657         struct rte_eth_dev *dev;
2658
2659         if (!rte_eth_dev_is_valid_port(port_id)) {
2660                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2661                 return (-ENODEV);
2662         }
2663
2664         dev = &rte_eth_devices[port_id];
2665         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2666         return ((*dev->dev_ops->dev_led_on)(dev));
2667 }
2668
2669 int
2670 rte_eth_led_off(uint8_t port_id)
2671 {
2672         struct rte_eth_dev *dev;
2673
2674         if (!rte_eth_dev_is_valid_port(port_id)) {
2675                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2676                 return (-ENODEV);
2677         }
2678
2679         dev = &rte_eth_devices[port_id];
2680         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2681         return ((*dev->dev_ops->dev_led_off)(dev));
2682 }
2683
2684 /*
2685  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2686  * an empty spot.
2687  */
2688 static inline int
2689 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2690 {
2691         struct rte_eth_dev_info dev_info;
2692         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2693         unsigned i;
2694
2695         rte_eth_dev_info_get(port_id, &dev_info);
2696
2697         for (i = 0; i < dev_info.max_mac_addrs; i++)
2698                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2699                         return i;
2700
2701         return -1;
2702 }
2703
2704 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
2705
2706 int
2707 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2708                         uint32_t pool)
2709 {
2710         struct rte_eth_dev *dev;
2711         int index;
2712         uint64_t pool_mask;
2713
2714         if (!rte_eth_dev_is_valid_port(port_id)) {
2715                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2716                 return (-ENODEV);
2717         }
2718
2719         dev = &rte_eth_devices[port_id];
2720         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2721
2722         if (is_zero_ether_addr(addr)) {
2723                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2724                         port_id);
2725                 return (-EINVAL);
2726         }
2727         if (pool >= ETH_64_POOLS) {
2728                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
2729                 return (-EINVAL);
2730         }
2731
2732         index = get_mac_addr_index(port_id, addr);
2733         if (index < 0) {
2734                 index = get_mac_addr_index(port_id, &null_mac_addr);
2735                 if (index < 0) {
2736                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2737                                 port_id);
2738                         return (-ENOSPC);
2739                 }
2740         } else {
2741                 pool_mask = dev->data->mac_pool_sel[index];
2742
2743                 /* Check if both MAC address and pool is alread there, and do nothing */
2744                 if (pool_mask & (1ULL << pool))
2745                         return 0;
2746         }
2747
2748         /* Update NIC */
2749         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2750
2751         /* Update address in NIC data structure */
2752         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2753
2754         /* Update pool bitmap in NIC data structure */
2755         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2756
2757         return 0;
2758 }
2759
2760 int
2761 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2762 {
2763         struct rte_eth_dev *dev;
2764         int index;
2765
2766         if (!rte_eth_dev_is_valid_port(port_id)) {
2767                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2768                 return (-ENODEV);
2769         }
2770
2771         dev = &rte_eth_devices[port_id];
2772         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2773
2774         index = get_mac_addr_index(port_id, addr);
2775         if (index == 0) {
2776                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2777                 return (-EADDRINUSE);
2778         } else if (index < 0)
2779                 return 0;  /* Do nothing if address wasn't found */
2780
2781         /* Update NIC */
2782         (*dev->dev_ops->mac_addr_remove)(dev, index);
2783
2784         /* Update address in NIC data structure */
2785         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2786
2787         /* reset pool bitmap */
2788         dev->data->mac_pool_sel[index] = 0;
2789
2790         return 0;
2791 }
2792
2793 int
2794 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2795                                 uint16_t rx_mode, uint8_t on)
2796 {
2797         uint16_t num_vfs;
2798         struct rte_eth_dev *dev;
2799         struct rte_eth_dev_info dev_info;
2800
2801         if (!rte_eth_dev_is_valid_port(port_id)) {
2802                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
2803                                 port_id);
2804                 return (-ENODEV);
2805         }
2806
2807         dev = &rte_eth_devices[port_id];
2808         rte_eth_dev_info_get(port_id, &dev_info);
2809
2810         num_vfs = dev_info.max_vfs;
2811         if (vf > num_vfs)
2812         {
2813                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2814                 return (-EINVAL);
2815         }
2816         if (rx_mode == 0)
2817         {
2818                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2819                 return (-EINVAL);
2820         }
2821         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2822         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2823 }
2824
2825 /*
2826  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2827  * an empty spot.
2828  */
2829 static inline int
2830 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2831 {
2832         struct rte_eth_dev_info dev_info;
2833         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2834         unsigned i;
2835
2836         rte_eth_dev_info_get(port_id, &dev_info);
2837         if (!dev->data->hash_mac_addrs)
2838                 return -1;
2839
2840         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2841                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2842                         ETHER_ADDR_LEN) == 0)
2843                         return i;
2844
2845         return -1;
2846 }
2847
2848 int
2849 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2850                                 uint8_t on)
2851 {
2852         int index;
2853         int ret;
2854         struct rte_eth_dev *dev;
2855
2856         if (!rte_eth_dev_is_valid_port(port_id)) {
2857                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2858                         port_id);
2859                 return (-ENODEV);
2860         }
2861
2862         dev = &rte_eth_devices[port_id];
2863         if (is_zero_ether_addr(addr)) {
2864                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2865                         port_id);
2866                 return (-EINVAL);
2867         }
2868
2869         index = get_hash_mac_addr_index(port_id, addr);
2870         /* Check if it's already there, and do nothing */
2871         if ((index >= 0) && (on))
2872                 return 0;
2873
2874         if (index < 0) {
2875                 if (!on) {
2876                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
2877                                 "set in UTA\n", port_id);
2878                         return (-EINVAL);
2879                 }
2880
2881                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2882                 if (index < 0) {
2883                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2884                                         port_id);
2885                         return (-ENOSPC);
2886                 }
2887         }
2888
2889         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2890         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2891         if (ret == 0) {
2892                 /* Update address in NIC data structure */
2893                 if (on)
2894                         ether_addr_copy(addr,
2895                                         &dev->data->hash_mac_addrs[index]);
2896                 else
2897                         ether_addr_copy(&null_mac_addr,
2898                                         &dev->data->hash_mac_addrs[index]);
2899         }
2900
2901         return ret;
2902 }
2903
2904 int
2905 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2906 {
2907         struct rte_eth_dev *dev;
2908
2909         if (!rte_eth_dev_is_valid_port(port_id)) {
2910                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2911                         port_id);
2912                 return (-ENODEV);
2913         }
2914
2915         dev = &rte_eth_devices[port_id];
2916
2917         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2918         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2919 }
2920
2921 int
2922 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
2923 {
2924         uint16_t num_vfs;
2925         struct rte_eth_dev *dev;
2926         struct rte_eth_dev_info dev_info;
2927
2928         if (!rte_eth_dev_is_valid_port(port_id)) {
2929                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2930                 return (-ENODEV);
2931         }
2932
2933         dev = &rte_eth_devices[port_id];
2934         rte_eth_dev_info_get(port_id, &dev_info);
2935
2936         num_vfs = dev_info.max_vfs;
2937         if (vf > num_vfs)
2938         {
2939                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2940                 return (-EINVAL);
2941         }
2942
2943         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2944         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2945 }
2946
2947 int
2948 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2949 {
2950         uint16_t num_vfs;
2951         struct rte_eth_dev *dev;
2952         struct rte_eth_dev_info dev_info;
2953
2954         if (!rte_eth_dev_is_valid_port(port_id)) {
2955                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2956                 return (-ENODEV);
2957         }
2958
2959         dev = &rte_eth_devices[port_id];
2960         rte_eth_dev_info_get(port_id, &dev_info);
2961
2962         num_vfs = dev_info.max_vfs;
2963         if (vf > num_vfs)
2964         {
2965                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2966                 return (-EINVAL);
2967         }
2968
2969         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2970         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2971 }
2972
2973 int
2974 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2975                                  uint64_t vf_mask,uint8_t vlan_on)
2976 {
2977         struct rte_eth_dev *dev;
2978
2979         if (!rte_eth_dev_is_valid_port(port_id)) {
2980                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2981                                 port_id);
2982                 return (-ENODEV);
2983         }
2984         dev = &rte_eth_devices[port_id];
2985
2986         if(vlan_id > ETHER_MAX_VLAN_ID)
2987         {
2988                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2989                         vlan_id);
2990                 return (-EINVAL);
2991         }
2992         if (vf_mask == 0)
2993         {
2994                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2995                 return (-EINVAL);
2996         }
2997
2998         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2999         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
3000                                                 vf_mask,vlan_on);
3001 }
3002
3003 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
3004                                         uint16_t tx_rate)
3005 {
3006         struct rte_eth_dev *dev;
3007         struct rte_eth_dev_info dev_info;
3008         struct rte_eth_link link;
3009
3010         if (!rte_eth_dev_is_valid_port(port_id)) {
3011                 PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
3012                                 port_id);
3013                 return -ENODEV;
3014         }
3015
3016         dev = &rte_eth_devices[port_id];
3017         rte_eth_dev_info_get(port_id, &dev_info);
3018         link = dev->data->dev_link;
3019
3020         if (queue_idx > dev_info.max_tx_queues) {
3021                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3022                                 "invalid queue id=%d\n", port_id, queue_idx);
3023                 return -EINVAL;
3024         }
3025
3026         if (tx_rate > link.link_speed) {
3027                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3028                                 "bigger than link speed= %d\n",
3029                         tx_rate, link.link_speed);
3030                 return -EINVAL;
3031         }
3032
3033         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3034         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
3035 }
3036
3037 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
3038                                 uint64_t q_msk)
3039 {
3040         struct rte_eth_dev *dev;
3041         struct rte_eth_dev_info dev_info;
3042         struct rte_eth_link link;
3043
3044         if (q_msk == 0)
3045                 return 0;
3046
3047         if (!rte_eth_dev_is_valid_port(port_id)) {
3048                 PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
3049                                 port_id);
3050                 return -ENODEV;
3051         }
3052
3053         dev = &rte_eth_devices[port_id];
3054         rte_eth_dev_info_get(port_id, &dev_info);
3055         link = dev->data->dev_link;
3056
3057         if (vf > dev_info.max_vfs) {
3058                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
3059                                 "invalid vf id=%d\n", port_id, vf);
3060                 return -EINVAL;
3061         }
3062
3063         if (tx_rate > link.link_speed) {
3064                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
3065                                 "bigger than link speed= %d\n",
3066                                 tx_rate, link.link_speed);
3067                 return -EINVAL;
3068         }
3069
3070         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
3071         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
3072 }
3073
3074 int
3075 rte_eth_mirror_rule_set(uint8_t port_id,
3076                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
3077                         uint8_t rule_id, uint8_t on)
3078 {
3079         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3080
3081         if (!rte_eth_dev_is_valid_port(port_id)) {
3082                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3083                 return (-ENODEV);
3084         }
3085
3086         if (mirror_conf->rule_type_mask == 0) {
3087                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3088                 return (-EINVAL);
3089         }
3090
3091         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3092                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
3093                         "be 0-%d\n",ETH_64_POOLS - 1);
3094                 return (-EINVAL);
3095         }
3096
3097         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
3098                 (mirror_conf->pool_mask == 0)) {
3099                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
3100                                 "be 0.\n");
3101                 return (-EINVAL);
3102         }
3103
3104         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
3105         {
3106                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
3107                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
3108                 return (-EINVAL);
3109         }
3110
3111         dev = &rte_eth_devices[port_id];
3112         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3113
3114         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
3115 }
3116
3117 int
3118 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
3119 {
3120         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3121
3122         if (!rte_eth_dev_is_valid_port(port_id)) {
3123                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3124                 return (-ENODEV);
3125         }
3126
3127         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
3128         {
3129                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
3130                         ETH_VMDQ_NUM_MIRROR_RULE-1);
3131                 return (-EINVAL);
3132         }
3133
3134         dev = &rte_eth_devices[port_id];
3135         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3136
3137         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
3138 }
3139
3140 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3141 uint16_t
3142 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
3143                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
3144 {
3145         struct rte_eth_dev *dev;
3146
3147         if (!rte_eth_dev_is_valid_port(port_id)) {
3148                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3149                 return 0;
3150         }
3151
3152         dev = &rte_eth_devices[port_id];
3153         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
3154         if (queue_id >= dev->data->nb_rx_queues) {
3155                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3156                 return 0;
3157         }
3158         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
3159                                                 rx_pkts, nb_pkts);
3160 }
3161
3162 uint16_t
3163 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
3164                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3165 {
3166         struct rte_eth_dev *dev;
3167
3168         if (!rte_eth_dev_is_valid_port(port_id)) {
3169                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3170                 return 0;
3171         }
3172
3173         dev = &rte_eth_devices[port_id];
3174
3175         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
3176         if (queue_id >= dev->data->nb_tx_queues) {
3177                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3178                 return 0;
3179         }
3180         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
3181                                                 tx_pkts, nb_pkts);
3182 }
3183
3184 uint32_t
3185 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
3186 {
3187         struct rte_eth_dev *dev;
3188
3189         if (!rte_eth_dev_is_valid_port(port_id)) {
3190                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3191                 return 0;
3192         }
3193
3194         dev = &rte_eth_devices[port_id];
3195         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
3196         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
3197 }
3198
3199 int
3200 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
3201 {
3202         struct rte_eth_dev *dev;
3203
3204         if (!rte_eth_dev_is_valid_port(port_id)) {
3205                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3206                 return (-ENODEV);
3207         }
3208
3209         dev = &rte_eth_devices[port_id];
3210         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
3211         return (*dev->dev_ops->rx_descriptor_done)( \
3212                 dev->data->rx_queues[queue_id], offset);
3213 }
3214 #endif
3215
3216 int
3217 rte_eth_dev_callback_register(uint8_t port_id,
3218                         enum rte_eth_event_type event,
3219                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3220 {
3221         struct rte_eth_dev *dev;
3222         struct rte_eth_dev_callback *user_cb;
3223
3224         if (!cb_fn)
3225                 return (-EINVAL);
3226
3227         if (!rte_eth_dev_is_valid_port(port_id)) {
3228                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3229                 return (-EINVAL);
3230         }
3231
3232         dev = &rte_eth_devices[port_id];
3233         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3234
3235         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3236                 if (user_cb->cb_fn == cb_fn &&
3237                         user_cb->cb_arg == cb_arg &&
3238                         user_cb->event == event) {
3239                         break;
3240                 }
3241         }
3242
3243         /* create a new callback. */
3244         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3245                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
3246                 user_cb->cb_fn = cb_fn;
3247                 user_cb->cb_arg = cb_arg;
3248                 user_cb->event = event;
3249                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
3250         }
3251
3252         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3253         return ((user_cb == NULL) ? -ENOMEM : 0);
3254 }
3255
3256 int
3257 rte_eth_dev_callback_unregister(uint8_t port_id,
3258                         enum rte_eth_event_type event,
3259                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3260 {
3261         int ret;
3262         struct rte_eth_dev *dev;
3263         struct rte_eth_dev_callback *cb, *next;
3264
3265         if (!cb_fn)
3266                 return (-EINVAL);
3267
3268         if (!rte_eth_dev_is_valid_port(port_id)) {
3269                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3270                 return (-EINVAL);
3271         }
3272
3273         dev = &rte_eth_devices[port_id];
3274         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3275
3276         ret = 0;
3277         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
3278
3279                 next = TAILQ_NEXT(cb, next);
3280
3281                 if (cb->cb_fn != cb_fn || cb->event != event ||
3282                                 (cb->cb_arg != (void *)-1 &&
3283                                 cb->cb_arg != cb_arg))
3284                         continue;
3285
3286                 /*
3287                  * if this callback is not executing right now,
3288                  * then remove it.
3289                  */
3290                 if (cb->active == 0) {
3291                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3292                         rte_free(cb);
3293                 } else {
3294                         ret = -EAGAIN;
3295                 }
3296         }
3297
3298         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3299         return (ret);
3300 }
3301
3302 void
3303 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3304         enum rte_eth_event_type event)
3305 {
3306         struct rte_eth_dev_callback *cb_lst;
3307         struct rte_eth_dev_callback dev_cb;
3308
3309         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3310         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3311                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3312                         continue;
3313                 dev_cb = *cb_lst;
3314                 cb_lst->active = 1;
3315                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3316                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3317                                                 dev_cb.cb_arg);
3318                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3319                 cb_lst->active = 0;
3320         }
3321         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3322 }
3323 #ifdef RTE_NIC_BYPASS
3324 int rte_eth_dev_bypass_init(uint8_t port_id)
3325 {
3326         struct rte_eth_dev *dev;
3327
3328         if (!rte_eth_dev_is_valid_port(port_id)) {
3329                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3330                 return (-ENODEV);
3331         }
3332
3333         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3334                 PMD_DEBUG_TRACE("Invalid port device\n");
3335                 return (-ENODEV);
3336         }
3337
3338         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
3339         (*dev->dev_ops->bypass_init)(dev);
3340         return 0;
3341 }
3342
3343 int
3344 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
3345 {
3346         struct rte_eth_dev *dev;
3347
3348         if (!rte_eth_dev_is_valid_port(port_id)) {
3349                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3350                 return (-ENODEV);
3351         }
3352
3353         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3354                 PMD_DEBUG_TRACE("Invalid port device\n");
3355                 return (-ENODEV);
3356         }
3357         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3358         (*dev->dev_ops->bypass_state_show)(dev, state);
3359         return 0;
3360 }
3361
3362 int
3363 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
3364 {
3365         struct rte_eth_dev *dev;
3366
3367         if (!rte_eth_dev_is_valid_port(port_id)) {
3368                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3369                 return (-ENODEV);
3370         }
3371
3372         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3373                 PMD_DEBUG_TRACE("Invalid port device\n");
3374                 return (-ENODEV);
3375         }
3376
3377         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
3378         (*dev->dev_ops->bypass_state_set)(dev, new_state);
3379         return 0;
3380 }
3381
3382 int
3383 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
3384 {
3385         struct rte_eth_dev *dev;
3386
3387         if (!rte_eth_dev_is_valid_port(port_id)) {
3388                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3389                 return (-ENODEV);
3390         }
3391
3392         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3393                 PMD_DEBUG_TRACE("Invalid port device\n");
3394                 return (-ENODEV);
3395         }
3396
3397         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3398         (*dev->dev_ops->bypass_event_show)(dev, event, state);
3399         return 0;
3400 }
3401
3402 int
3403 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
3404 {
3405         struct rte_eth_dev *dev;
3406
3407         if (!rte_eth_dev_is_valid_port(port_id)) {
3408                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3409                 return (-ENODEV);
3410         }
3411
3412         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3413                 PMD_DEBUG_TRACE("Invalid port device\n");
3414                 return (-ENODEV);
3415         }
3416
3417         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
3418         (*dev->dev_ops->bypass_event_set)(dev, event, state);
3419         return 0;
3420 }
3421
3422 int
3423 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
3424 {
3425         struct rte_eth_dev *dev;
3426
3427         if (!rte_eth_dev_is_valid_port(port_id)) {
3428                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3429                 return (-ENODEV);
3430         }
3431
3432         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3433                 PMD_DEBUG_TRACE("Invalid port device\n");
3434                 return (-ENODEV);
3435         }
3436
3437         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
3438         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
3439         return 0;
3440 }
3441
3442 int
3443 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
3444 {
3445         struct rte_eth_dev *dev;
3446
3447         if (!rte_eth_dev_is_valid_port(port_id)) {
3448                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3449                 return (-ENODEV);
3450         }
3451
3452         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3453                 PMD_DEBUG_TRACE("Invalid port device\n");
3454                 return (-ENODEV);
3455         }
3456
3457         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
3458         (*dev->dev_ops->bypass_ver_show)(dev, ver);
3459         return 0;
3460 }
3461
3462 int
3463 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
3464 {
3465         struct rte_eth_dev *dev;
3466
3467         if (!rte_eth_dev_is_valid_port(port_id)) {
3468                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3469                 return (-ENODEV);
3470         }
3471
3472         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3473                 PMD_DEBUG_TRACE("Invalid port device\n");
3474                 return (-ENODEV);
3475         }
3476
3477         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
3478         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
3479         return 0;
3480 }
3481
3482 int
3483 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
3484 {
3485         struct rte_eth_dev *dev;
3486
3487         if (!rte_eth_dev_is_valid_port(port_id)) {
3488                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3489                 return (-ENODEV);
3490         }
3491
3492         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3493                 PMD_DEBUG_TRACE("Invalid port device\n");
3494                 return (-ENODEV);
3495         }
3496
3497         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
3498         (*dev->dev_ops->bypass_wd_reset)(dev);
3499         return 0;
3500 }
3501 #endif
3502
3503 int
3504 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
3505 {
3506         struct rte_eth_dev *dev;
3507
3508         if (!rte_eth_dev_is_valid_port(port_id)) {
3509                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3510                 return -ENODEV;
3511         }
3512
3513         dev = &rte_eth_devices[port_id];
3514         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3515         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3516                                 RTE_ETH_FILTER_NOP, NULL);
3517 }
3518
3519 int
3520 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3521                        enum rte_filter_op filter_op, void *arg)
3522 {
3523         struct rte_eth_dev *dev;
3524
3525         if (!rte_eth_dev_is_valid_port(port_id)) {
3526                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3527                 return -ENODEV;
3528         }
3529
3530         dev = &rte_eth_devices[port_id];
3531         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3532         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3533 }
3534
3535 void *
3536 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3537                 rte_rxtx_callback_fn fn, void *user_param)
3538 {
3539 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3540         rte_errno = ENOTSUP;
3541         return NULL;
3542 #endif
3543         /* check input parameters */
3544         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3545                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3546                 rte_errno = EINVAL;
3547                 return NULL;
3548         }
3549
3550         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3551
3552         if (cb == NULL) {
3553                 rte_errno = ENOMEM;
3554                 return NULL;
3555         }
3556
3557         cb->fn = fn;
3558         cb->param = user_param;
3559         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3560         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3561         return cb;
3562 }
3563
3564 void *
3565 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3566                 rte_rxtx_callback_fn fn, void *user_param)
3567 {
3568 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3569         rte_errno = ENOTSUP;
3570         return NULL;
3571 #endif
3572         /* check input parameters */
3573         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3574                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3575                 rte_errno = EINVAL;
3576                 return NULL;
3577         }
3578
3579         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3580
3581         if (cb == NULL) {
3582                 rte_errno = ENOMEM;
3583                 return NULL;
3584         }
3585
3586         cb->fn = fn;
3587         cb->param = user_param;
3588         cb->next = rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3589         rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3590         return cb;
3591 }
3592
3593 int
3594 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3595                 struct rte_eth_rxtx_callback *user_cb)
3596 {
3597 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3598         return (-ENOTSUP);
3599 #endif
3600         /* Check input parameters. */
3601         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3602                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3603                 return (-EINVAL);
3604         }
3605
3606         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3607         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3608         struct rte_eth_rxtx_callback *prev_cb;
3609
3610         /* Reset head pointer and remove user cb if first in the list. */
3611         if (cb == user_cb) {
3612                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3613                 return 0;
3614         }
3615
3616         /* Remove the user cb from the callback list. */
3617         do {
3618                 prev_cb = cb;
3619                 cb = cb->next;
3620
3621                 if (cb == user_cb) {
3622                         prev_cb->next = user_cb->next;
3623                         return 0;
3624                 }
3625
3626         } while (cb != NULL);
3627
3628         /* Callback wasn't found. */
3629         return (-EINVAL);
3630 }
3631
3632 int
3633 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3634                 struct rte_eth_rxtx_callback *user_cb)
3635 {
3636 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3637         return (-ENOTSUP);
3638 #endif
3639         /* Check input parameters. */
3640         if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
3641                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3642                 return (-EINVAL);
3643         }
3644
3645         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3646         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3647         struct rte_eth_rxtx_callback *prev_cb;
3648
3649         /* Reset head pointer and remove user cb if first in the list. */
3650         if (cb == user_cb) {
3651                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3652                 return 0;
3653         }
3654
3655         /* Remove the user cb from the callback list. */
3656         do {
3657                 prev_cb = cb;
3658                 cb = cb->next;
3659
3660                 if (cb == user_cb) {
3661                         prev_cb->next = user_cb->next;
3662                         return 0;
3663                 }
3664
3665         } while (cb != NULL);
3666
3667         /* Callback wasn't found. */
3668         return (-EINVAL);
3669 }