ethdev: add device type
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68 #include <rte_string_fns.h>
69
70 #include "rte_ether.h"
71 #include "rte_ethdev.h"
72
73 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
74 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
75                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
76         } while (0)
77 #else
78 #define PMD_DEBUG_TRACE(fmt, args...)
79 #endif
80
81 /* Macros for checking for restricting functions to primary instance only */
82 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
83         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
84                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
85                 return (retval); \
86         } \
87 } while(0)
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while(0)
94
95 /* Macros to check for invlaid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while(0)
102 #define FUNC_PTR_OR_RET(func) do { \
103         if ((func) == NULL) { \
104                 PMD_DEBUG_TRACE("Function not supported\n"); \
105                 return; \
106         } \
107 } while(0)
108
109 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
110 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
111 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
112 static uint8_t nb_ports = 0;
113
114 /* spinlock for eth device callbacks */
115 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
116
117 /* store statistics names and its offset in stats structure  */
118 struct rte_eth_xstats_name_off {
119         char name[RTE_ETH_XSTATS_NAME_SIZE];
120         unsigned offset;
121 };
122
123 static struct rte_eth_xstats_name_off rte_stats_strings[] = {
124          {"rx_packets", offsetof(struct rte_eth_stats, ipackets)},
125          {"tx_packets", offsetof(struct rte_eth_stats, opackets)},
126          {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)},
127          {"tx_bytes", offsetof(struct rte_eth_stats, obytes)},
128          {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
129          {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
130          {"rx_crc_errors", offsetof(struct rte_eth_stats, ibadcrc)},
131          {"rx_bad_length_errors", offsetof(struct rte_eth_stats, ibadlen)},
132          {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
133          {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)},
134          {"fdir_match", offsetof(struct rte_eth_stats, fdirmatch)},
135          {"fdir_miss", offsetof(struct rte_eth_stats, fdirmiss)},
136          {"tx_flow_control_xon", offsetof(struct rte_eth_stats, tx_pause_xon)},
137          {"rx_flow_control_xon", offsetof(struct rte_eth_stats, rx_pause_xon)},
138          {"tx_flow_control_xoff", offsetof(struct rte_eth_stats, tx_pause_xoff)},
139          {"rx_flow_control_xoff", offsetof(struct rte_eth_stats, rx_pause_xoff)},
140 };
141 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
142
143 static struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
144         {"rx_packets", offsetof(struct rte_eth_stats, q_ipackets)},
145         {"rx_bytes", offsetof(struct rte_eth_stats, q_ibytes)},
146 };
147 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
148                 sizeof(rte_rxq_stats_strings[0]))
149
150 static struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
151         {"tx_packets", offsetof(struct rte_eth_stats, q_opackets)},
152         {"tx_bytes", offsetof(struct rte_eth_stats, q_obytes)},
153         {"tx_errors", offsetof(struct rte_eth_stats, q_errors)},
154 };
155 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
156                 sizeof(rte_txq_stats_strings[0]))
157
158
159 /**
160  * The user application callback description.
161  *
162  * It contains callback address to be registered by user application,
163  * the pointer to the parameters for callback, and the event type.
164  */
165 struct rte_eth_dev_callback {
166         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
167         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
168         void *cb_arg;                           /**< Parameter for callback */
169         enum rte_eth_event_type event;          /**< Interrupt event type */
170         uint32_t active;                        /**< Callback is executing */
171 };
172
173 enum {
174         STAT_QMAP_TX = 0,
175         STAT_QMAP_RX
176 };
177
178 enum {
179         DEV_DETACHED = 0,
180         DEV_ATTACHED
181 };
182
183 static inline void
184 rte_eth_dev_data_alloc(void)
185 {
186         const unsigned flags = 0;
187         const struct rte_memzone *mz;
188
189         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
190                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
191                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
192                                 rte_socket_id(), flags);
193         } else
194                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
195         if (mz == NULL)
196                 rte_panic("Cannot allocate memzone for ethernet port data\n");
197
198         rte_eth_dev_data = mz->addr;
199         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
200                 memset(rte_eth_dev_data, 0,
201                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
202 }
203
204 static struct rte_eth_dev *
205 rte_eth_dev_allocated(const char *name)
206 {
207         unsigned i;
208
209         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
210                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
211                     strcmp(rte_eth_devices[i].data->name, name) == 0)
212                         return &rte_eth_devices[i];
213         }
214         return NULL;
215 }
216
217 static uint8_t
218 rte_eth_dev_find_free_port(void)
219 {
220         unsigned i;
221
222         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
223                 if (rte_eth_devices[i].attached == DEV_DETACHED)
224                         return i;
225         }
226         return RTE_MAX_ETHPORTS;
227 }
228
229 struct rte_eth_dev *
230 rte_eth_dev_allocate(const char *name, enum rte_eth_dev_type type)
231 {
232         uint8_t port_id;
233         struct rte_eth_dev *eth_dev;
234
235         port_id = rte_eth_dev_find_free_port();
236         if (port_id == RTE_MAX_ETHPORTS) {
237                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
238                 return NULL;
239         }
240
241         if (rte_eth_dev_data == NULL)
242                 rte_eth_dev_data_alloc();
243
244         if (rte_eth_dev_allocated(name) != NULL) {
245                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n", name);
246                 return NULL;
247         }
248
249         eth_dev = &rte_eth_devices[port_id];
250         eth_dev->data = &rte_eth_dev_data[port_id];
251         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
252         eth_dev->data->port_id = port_id;
253         eth_dev->attached = DEV_ATTACHED;
254         eth_dev->dev_type = type;
255         nb_ports++;
256         return eth_dev;
257 }
258
259 static inline int
260 rte_eth_dev_create_unique_device_name(char *name, size_t size,
261                 struct rte_pci_device *pci_dev)
262 {
263         int ret;
264
265         if ((name == NULL) || (pci_dev == NULL))
266                 return -EINVAL;
267
268         ret = snprintf(name, size, "%d:%d.%d",
269                         pci_dev->addr.bus, pci_dev->addr.devid,
270                         pci_dev->addr.function);
271         if (ret < 0)
272                 return ret;
273
274         return 0;
275 }
276
277 int
278 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
279 {
280         if (eth_dev == NULL)
281                 return -EINVAL;
282
283         eth_dev->attached = 0;
284         nb_ports--;
285         return 0;
286 }
287
288 static int
289 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
290                  struct rte_pci_device *pci_dev)
291 {
292         struct eth_driver    *eth_drv;
293         struct rte_eth_dev *eth_dev;
294         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
295
296         int diag;
297
298         eth_drv = (struct eth_driver *)pci_drv;
299
300         /* Create unique Ethernet device name using PCI address */
301         rte_eth_dev_create_unique_device_name(ethdev_name,
302                         sizeof(ethdev_name), pci_dev);
303
304         eth_dev = rte_eth_dev_allocate(ethdev_name, RTE_ETH_DEV_PCI);
305         if (eth_dev == NULL)
306                 return -ENOMEM;
307
308         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
309                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
310                                   eth_drv->dev_private_size,
311                                   RTE_CACHE_LINE_SIZE);
312                 if (eth_dev->data->dev_private == NULL)
313                         rte_panic("Cannot allocate memzone for private port data\n");
314         }
315         eth_dev->pci_dev = pci_dev;
316         eth_dev->driver = eth_drv;
317         eth_dev->data->rx_mbuf_alloc_failed = 0;
318
319         /* init user callbacks */
320         TAILQ_INIT(&(eth_dev->link_intr_cbs));
321
322         /*
323          * Set the default MTU.
324          */
325         eth_dev->data->mtu = ETHER_MTU;
326
327         /* Invoke PMD device initialization function */
328         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
329         if (diag == 0)
330                 return (0);
331
332         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
333                         " failed\n", pci_drv->name,
334                         (unsigned) pci_dev->id.vendor_id,
335                         (unsigned) pci_dev->id.device_id);
336         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
337                 rte_free(eth_dev->data->dev_private);
338         eth_dev->attached = DEV_DETACHED;
339         nb_ports--;
340         return diag;
341 }
342
343 static int
344 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
345 {
346         const struct eth_driver *eth_drv;
347         struct rte_eth_dev *eth_dev;
348         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
349         int ret;
350
351         if (pci_dev == NULL)
352                 return -EINVAL;
353
354         /* Create unique Ethernet device name using PCI address */
355         rte_eth_dev_create_unique_device_name(ethdev_name,
356                         sizeof(ethdev_name), pci_dev);
357
358         eth_dev = rte_eth_dev_allocated(ethdev_name);
359         if (eth_dev == NULL)
360                 return -ENODEV;
361
362         eth_drv = (const struct eth_driver *)pci_dev->driver;
363
364         /* Invoke PMD device uninit function */
365         if (*eth_drv->eth_dev_uninit) {
366                 ret = (*eth_drv->eth_dev_uninit)(eth_drv, eth_dev);
367                 if (ret)
368                         return ret;
369         }
370
371         /* free ether device */
372         rte_eth_dev_release_port(eth_dev);
373
374         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
375                 rte_free(eth_dev->data->dev_private);
376
377         eth_dev->pci_dev = NULL;
378         eth_dev->driver = NULL;
379         eth_dev->data = NULL;
380
381         return 0;
382 }
383
384 /**
385  * Register an Ethernet [Poll Mode] driver.
386  *
387  * Function invoked by the initialization function of an Ethernet driver
388  * to simultaneously register itself as a PCI driver and as an Ethernet
389  * Poll Mode Driver.
390  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
391  * structure embedded in the *eth_drv* structure, after having stored the
392  * address of the rte_eth_dev_init() function in the *devinit* field of
393  * the *pci_drv* structure.
394  * During the PCI probing phase, the rte_eth_dev_init() function is
395  * invoked for each PCI [Ethernet device] matching the embedded PCI
396  * identifiers provided by the driver.
397  */
398 void
399 rte_eth_driver_register(struct eth_driver *eth_drv)
400 {
401         eth_drv->pci_drv.devinit = rte_eth_dev_init;
402         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
403         rte_eal_pci_register(&eth_drv->pci_drv);
404 }
405
406 static int
407 rte_eth_dev_is_valid_port(uint8_t port_id)
408 {
409         if (port_id >= RTE_MAX_ETHPORTS ||
410             rte_eth_devices[port_id].attached != DEV_ATTACHED)
411                 return 0;
412         else
413                 return 1;
414 }
415
416 int
417 rte_eth_dev_socket_id(uint8_t port_id)
418 {
419         if (!rte_eth_dev_is_valid_port(port_id))
420                 return -1;
421         return rte_eth_devices[port_id].pci_dev->numa_node;
422 }
423
424 uint8_t
425 rte_eth_dev_count(void)
426 {
427         return (nb_ports);
428 }
429
430 static int
431 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
432 {
433         uint16_t old_nb_queues = dev->data->nb_rx_queues;
434         void **rxq;
435         unsigned i;
436
437         if (dev->data->rx_queues == NULL) { /* first time configuration */
438                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
439                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
440                                 RTE_CACHE_LINE_SIZE);
441                 if (dev->data->rx_queues == NULL) {
442                         dev->data->nb_rx_queues = 0;
443                         return -(ENOMEM);
444                 }
445 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
446                 dev->post_rx_burst_cbs = rte_zmalloc(
447                         "ethdev->post_rx_burst_cbs",
448                         sizeof(*dev->post_rx_burst_cbs) * nb_queues,
449                         RTE_CACHE_LINE_SIZE);
450                 if (dev->post_rx_burst_cbs == NULL) {
451                         rte_free(dev->data->rx_queues);
452                         dev->data->rx_queues = NULL;
453                         dev->data->nb_rx_queues = 0;
454                         return -ENOMEM;
455                 }
456 #endif
457
458         } else { /* re-configure */
459                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
460
461                 rxq = dev->data->rx_queues;
462
463                 for (i = nb_queues; i < old_nb_queues; i++)
464                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
465                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
466                                 RTE_CACHE_LINE_SIZE);
467                 if (rxq == NULL)
468                         return -(ENOMEM);
469 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
470                 dev->post_rx_burst_cbs = rte_realloc(
471                         dev->post_rx_burst_cbs,
472                         sizeof(*dev->post_rx_burst_cbs) *
473                                 nb_queues, RTE_CACHE_LINE_SIZE);
474                 if (dev->post_rx_burst_cbs == NULL)
475                         return -ENOMEM;
476 #endif
477                 if (nb_queues > old_nb_queues) {
478                         uint16_t new_qs = nb_queues - old_nb_queues;
479                         memset(rxq + old_nb_queues, 0,
480                                 sizeof(rxq[0]) * new_qs);
481 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
482                         memset(dev->post_rx_burst_cbs + old_nb_queues, 0,
483                                 sizeof(dev->post_rx_burst_cbs[0]) * new_qs);
484 #endif
485                 }
486
487                 dev->data->rx_queues = rxq;
488
489         }
490         dev->data->nb_rx_queues = nb_queues;
491         return (0);
492 }
493
494 int
495 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
496 {
497         struct rte_eth_dev *dev;
498
499         /* This function is only safe when called from the primary process
500          * in a multi-process setup*/
501         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
502
503         if (!rte_eth_dev_is_valid_port(port_id)) {
504                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
505                 return -EINVAL;
506         }
507
508         dev = &rte_eth_devices[port_id];
509         if (rx_queue_id >= dev->data->nb_rx_queues) {
510                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
511                 return -EINVAL;
512         }
513
514         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
515
516         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
517
518 }
519
520 int
521 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
522 {
523         struct rte_eth_dev *dev;
524
525         /* This function is only safe when called from the primary process
526          * in a multi-process setup*/
527         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
528
529         if (!rte_eth_dev_is_valid_port(port_id)) {
530                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
531                 return -EINVAL;
532         }
533
534         dev = &rte_eth_devices[port_id];
535         if (rx_queue_id >= dev->data->nb_rx_queues) {
536                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
537                 return -EINVAL;
538         }
539
540         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
541
542         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
543
544 }
545
546 int
547 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
548 {
549         struct rte_eth_dev *dev;
550
551         /* This function is only safe when called from the primary process
552          * in a multi-process setup*/
553         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
554
555         if (!rte_eth_dev_is_valid_port(port_id)) {
556                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
557                 return -EINVAL;
558         }
559
560         dev = &rte_eth_devices[port_id];
561         if (tx_queue_id >= dev->data->nb_tx_queues) {
562                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
563                 return -EINVAL;
564         }
565
566         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
567
568         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
569
570 }
571
572 int
573 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
574 {
575         struct rte_eth_dev *dev;
576
577         /* This function is only safe when called from the primary process
578          * in a multi-process setup*/
579         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
580
581         if (!rte_eth_dev_is_valid_port(port_id)) {
582                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
583                 return -EINVAL;
584         }
585
586         dev = &rte_eth_devices[port_id];
587         if (tx_queue_id >= dev->data->nb_tx_queues) {
588                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
589                 return -EINVAL;
590         }
591
592         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
593
594         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
595
596 }
597
598 static int
599 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
600 {
601         uint16_t old_nb_queues = dev->data->nb_tx_queues;
602         void **txq;
603         unsigned i;
604
605         if (dev->data->tx_queues == NULL) { /* first time configuration */
606                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
607                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
608                                 RTE_CACHE_LINE_SIZE);
609                 if (dev->data->tx_queues == NULL) {
610                         dev->data->nb_tx_queues = 0;
611                         return -(ENOMEM);
612                 }
613 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
614                 dev->pre_tx_burst_cbs = rte_zmalloc(
615                         "ethdev->pre_tx_burst_cbs",
616                         sizeof(*dev->pre_tx_burst_cbs) * nb_queues,
617                         RTE_CACHE_LINE_SIZE);
618                 if (dev->pre_tx_burst_cbs == NULL) {
619                         rte_free(dev->data->tx_queues);
620                         dev->data->tx_queues = NULL;
621                         dev->data->nb_tx_queues = 0;
622                         return -ENOMEM;
623                 }
624 #endif
625
626         } else { /* re-configure */
627                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
628
629                 txq = dev->data->tx_queues;
630
631                 for (i = nb_queues; i < old_nb_queues; i++)
632                         (*dev->dev_ops->tx_queue_release)(txq[i]);
633                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
634                                 RTE_CACHE_LINE_SIZE);
635                 if (txq == NULL)
636                         return -ENOMEM;
637 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
638                 dev->pre_tx_burst_cbs = rte_realloc(
639                         dev->pre_tx_burst_cbs,
640                         sizeof(*dev->pre_tx_burst_cbs) *
641                                 nb_queues, RTE_CACHE_LINE_SIZE);
642                 if (dev->pre_tx_burst_cbs == NULL)
643                         return -ENOMEM;
644 #endif
645                 if (nb_queues > old_nb_queues) {
646                         uint16_t new_qs = nb_queues - old_nb_queues;
647                         memset(txq + old_nb_queues, 0,
648                                 sizeof(txq[0]) * new_qs);
649 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
650                         memset(dev->pre_tx_burst_cbs + old_nb_queues, 0,
651                                 sizeof(dev->pre_tx_burst_cbs[0]) * new_qs);
652 #endif
653                 }
654
655                 dev->data->tx_queues = txq;
656
657         }
658         dev->data->nb_tx_queues = nb_queues;
659         return (0);
660 }
661
662 static int
663 rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
664 {
665         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
666         switch (nb_rx_q) {
667         case 1:
668         case 2:
669                 RTE_ETH_DEV_SRIOV(dev).active =
670                         ETH_64_POOLS;
671                 break;
672         case 4:
673                 RTE_ETH_DEV_SRIOV(dev).active =
674                         ETH_32_POOLS;
675                 break;
676         default:
677                 return -EINVAL;
678         }
679
680         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
681         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
682                 dev->pci_dev->max_vfs * nb_rx_q;
683
684         return 0;
685 }
686
687 static int
688 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
689                       const struct rte_eth_conf *dev_conf)
690 {
691         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
692
693         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
694                 /* check multi-queue mode */
695                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
696                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
697                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
698                         /* SRIOV only works in VMDq enable mode */
699                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
700                                         " SRIOV active, "
701                                         "wrong VMDQ mq_mode rx %u tx %u\n",
702                                         port_id,
703                                         dev_conf->rxmode.mq_mode,
704                                         dev_conf->txmode.mq_mode);
705                         return (-EINVAL);
706                 }
707
708                 switch (dev_conf->rxmode.mq_mode) {
709                 case ETH_MQ_RX_VMDQ_DCB:
710                 case ETH_MQ_RX_VMDQ_DCB_RSS:
711                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
712                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
713                                         " SRIOV active, "
714                                         "unsupported VMDQ mq_mode rx %u\n",
715                                         port_id, dev_conf->rxmode.mq_mode);
716                         return (-EINVAL);
717                 case ETH_MQ_RX_RSS:
718                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
719                                         " SRIOV active, "
720                                         "Rx mq mode is changed from:"
721                                         "mq_mode %u into VMDQ mq_mode %u\n",
722                                         port_id,
723                                         dev_conf->rxmode.mq_mode,
724                                         dev->data->dev_conf.rxmode.mq_mode);
725                 case ETH_MQ_RX_VMDQ_RSS:
726                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
727                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
728                                 if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) {
729                                         PMD_DEBUG_TRACE("ethdev port_id=%d"
730                                                 " SRIOV active, invalid queue"
731                                                 " number for VMDQ RSS, allowed"
732                                                 " value are 1, 2 or 4\n",
733                                                 port_id);
734                                         return -EINVAL;
735                                 }
736                         break;
737                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
738                         /* if nothing mq mode configure, use default scheme */
739                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
740                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
741                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
742                         break;
743                 }
744
745                 switch (dev_conf->txmode.mq_mode) {
746                 case ETH_MQ_TX_VMDQ_DCB:
747                         /* DCB VMDQ in SRIOV mode, not implement yet */
748                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
749                                         " SRIOV active, "
750                                         "unsupported VMDQ mq_mode tx %u\n",
751                                         port_id, dev_conf->txmode.mq_mode);
752                         return (-EINVAL);
753                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
754                         /* if nothing mq mode configure, use default scheme */
755                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
756                         break;
757                 }
758
759                 /* check valid queue number */
760                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
761                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
762                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
763                                     "queue number must less equal to %d\n",
764                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
765                         return (-EINVAL);
766                 }
767         } else {
768                 /* For vmdb+dcb mode check our configuration before we go further */
769                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
770                         const struct rte_eth_vmdq_dcb_conf *conf;
771
772                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
773                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
774                                                 "!= %d\n",
775                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
776                                 return (-EINVAL);
777                         }
778                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
779                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
780                                conf->nb_queue_pools == ETH_32_POOLS)) {
781                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
782                                                 "nb_queue_pools must be %d or %d\n",
783                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
784                                 return (-EINVAL);
785                         }
786                 }
787                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
788                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
789
790                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
791                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
792                                                 "!= %d\n",
793                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
794                                 return (-EINVAL);
795                         }
796                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
797                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
798                                conf->nb_queue_pools == ETH_32_POOLS)) {
799                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
800                                                 "nb_queue_pools != %d or nb_queue_pools "
801                                                 "!= %d\n",
802                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
803                                 return (-EINVAL);
804                         }
805                 }
806
807                 /* For DCB mode check our configuration before we go further */
808                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
809                         const struct rte_eth_dcb_rx_conf *conf;
810
811                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
812                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
813                                                 "!= %d\n",
814                                                 port_id, ETH_DCB_NUM_QUEUES);
815                                 return (-EINVAL);
816                         }
817                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
818                         if (! (conf->nb_tcs == ETH_4_TCS ||
819                                conf->nb_tcs == ETH_8_TCS)) {
820                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
821                                                 "nb_tcs != %d or nb_tcs "
822                                                 "!= %d\n",
823                                                 port_id, ETH_4_TCS, ETH_8_TCS);
824                                 return (-EINVAL);
825                         }
826                 }
827
828                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
829                         const struct rte_eth_dcb_tx_conf *conf;
830
831                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
832                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
833                                                 "!= %d\n",
834                                                 port_id, ETH_DCB_NUM_QUEUES);
835                                 return (-EINVAL);
836                         }
837                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
838                         if (! (conf->nb_tcs == ETH_4_TCS ||
839                                conf->nb_tcs == ETH_8_TCS)) {
840                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
841                                                 "nb_tcs != %d or nb_tcs "
842                                                 "!= %d\n",
843                                                 port_id, ETH_4_TCS, ETH_8_TCS);
844                                 return (-EINVAL);
845                         }
846                 }
847         }
848         return 0;
849 }
850
851 int
852 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
853                       const struct rte_eth_conf *dev_conf)
854 {
855         struct rte_eth_dev *dev;
856         struct rte_eth_dev_info dev_info;
857         int diag;
858
859         /* This function is only safe when called from the primary process
860          * in a multi-process setup*/
861         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
862
863         if (!rte_eth_dev_is_valid_port(port_id)) {
864                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
865                 return (-EINVAL);
866         }
867
868         dev = &rte_eth_devices[port_id];
869
870         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
871         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
872
873         if (dev->data->dev_started) {
874                 PMD_DEBUG_TRACE(
875                     "port %d must be stopped to allow configuration\n", port_id);
876                 return (-EBUSY);
877         }
878
879         /*
880          * Check that the numbers of RX and TX queues are not greater
881          * than the maximum number of RX and TX queues supported by the
882          * configured device.
883          */
884         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
885         if (nb_rx_q > dev_info.max_rx_queues) {
886                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
887                                 port_id, nb_rx_q, dev_info.max_rx_queues);
888                 return (-EINVAL);
889         }
890         if (nb_rx_q == 0) {
891                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
892                 return (-EINVAL);
893         }
894
895         if (nb_tx_q > dev_info.max_tx_queues) {
896                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
897                                 port_id, nb_tx_q, dev_info.max_tx_queues);
898                 return (-EINVAL);
899         }
900         if (nb_tx_q == 0) {
901                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
902                 return (-EINVAL);
903         }
904
905         /* Copy the dev_conf parameter into the dev structure */
906         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
907
908         /*
909          * If link state interrupt is enabled, check that the
910          * device supports it.
911          */
912         if (dev_conf->intr_conf.lsc == 1) {
913                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
914
915                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
916                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
917                                         pci_drv->name);
918                         return (-EINVAL);
919                 }
920         }
921
922         /*
923          * If jumbo frames are enabled, check that the maximum RX packet
924          * length is supported by the configured device.
925          */
926         if (dev_conf->rxmode.jumbo_frame == 1) {
927                 if (dev_conf->rxmode.max_rx_pkt_len >
928                     dev_info.max_rx_pktlen) {
929                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
930                                 " > max valid value %u\n",
931                                 port_id,
932                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
933                                 (unsigned)dev_info.max_rx_pktlen);
934                         return (-EINVAL);
935                 }
936                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
937                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
938                                 " < min valid value %u\n",
939                                 port_id,
940                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
941                                 (unsigned)ETHER_MIN_LEN);
942                         return (-EINVAL);
943                 }
944         } else {
945                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
946                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
947                         /* Use default value */
948                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
949                                                         ETHER_MAX_LEN;
950         }
951
952         /* multipe queue mode checking */
953         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
954         if (diag != 0) {
955                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
956                                 port_id, diag);
957                 return diag;
958         }
959
960         /*
961          * Setup new number of RX/TX queues and reconfigure device.
962          */
963         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
964         if (diag != 0) {
965                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
966                                 port_id, diag);
967                 return diag;
968         }
969
970         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
971         if (diag != 0) {
972                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
973                                 port_id, diag);
974                 rte_eth_dev_rx_queue_config(dev, 0);
975                 return diag;
976         }
977
978         diag = (*dev->dev_ops->dev_configure)(dev);
979         if (diag != 0) {
980                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
981                                 port_id, diag);
982                 rte_eth_dev_rx_queue_config(dev, 0);
983                 rte_eth_dev_tx_queue_config(dev, 0);
984                 return diag;
985         }
986
987         return 0;
988 }
989
990 static void
991 rte_eth_dev_config_restore(uint8_t port_id)
992 {
993         struct rte_eth_dev *dev;
994         struct rte_eth_dev_info dev_info;
995         struct ether_addr addr;
996         uint16_t i;
997         uint32_t pool = 0;
998
999         dev = &rte_eth_devices[port_id];
1000
1001         rte_eth_dev_info_get(port_id, &dev_info);
1002
1003         if (RTE_ETH_DEV_SRIOV(dev).active)
1004                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1005
1006         /* replay MAC address configuration */
1007         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1008                 addr = dev->data->mac_addrs[i];
1009
1010                 /* skip zero address */
1011                 if (is_zero_ether_addr(&addr))
1012                         continue;
1013
1014                 /* add address to the hardware */
1015                 if  (*dev->dev_ops->mac_addr_add &&
1016                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1017                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1018                 else {
1019                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1020                                         port_id);
1021                         /* exit the loop but not return an error */
1022                         break;
1023                 }
1024         }
1025
1026         /* replay promiscuous configuration */
1027         if (rte_eth_promiscuous_get(port_id) == 1)
1028                 rte_eth_promiscuous_enable(port_id);
1029         else if (rte_eth_promiscuous_get(port_id) == 0)
1030                 rte_eth_promiscuous_disable(port_id);
1031
1032         /* replay allmulticast configuration */
1033         if (rte_eth_allmulticast_get(port_id) == 1)
1034                 rte_eth_allmulticast_enable(port_id);
1035         else if (rte_eth_allmulticast_get(port_id) == 0)
1036                 rte_eth_allmulticast_disable(port_id);
1037 }
1038
1039 int
1040 rte_eth_dev_start(uint8_t port_id)
1041 {
1042         struct rte_eth_dev *dev;
1043         int diag;
1044
1045         /* This function is only safe when called from the primary process
1046          * in a multi-process setup*/
1047         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1048
1049         if (!rte_eth_dev_is_valid_port(port_id)) {
1050                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1051                 return (-EINVAL);
1052         }
1053
1054         dev = &rte_eth_devices[port_id];
1055
1056         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1057
1058         if (dev->data->dev_started != 0) {
1059                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1060                         " already started\n",
1061                         port_id);
1062                 return (0);
1063         }
1064
1065         diag = (*dev->dev_ops->dev_start)(dev);
1066         if (diag == 0)
1067                 dev->data->dev_started = 1;
1068         else
1069                 return diag;
1070
1071         rte_eth_dev_config_restore(port_id);
1072
1073         if (dev->data->dev_conf.intr_conf.lsc != 0) {
1074                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1075                 (*dev->dev_ops->link_update)(dev, 0);
1076         }
1077         return 0;
1078 }
1079
1080 void
1081 rte_eth_dev_stop(uint8_t port_id)
1082 {
1083         struct rte_eth_dev *dev;
1084
1085         /* This function is only safe when called from the primary process
1086          * in a multi-process setup*/
1087         PROC_PRIMARY_OR_RET();
1088
1089         if (!rte_eth_dev_is_valid_port(port_id)) {
1090                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1091                 return;
1092         }
1093
1094         dev = &rte_eth_devices[port_id];
1095
1096         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1097
1098         if (dev->data->dev_started == 0) {
1099                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1100                         " already stopped\n",
1101                         port_id);
1102                 return;
1103         }
1104
1105         dev->data->dev_started = 0;
1106         (*dev->dev_ops->dev_stop)(dev);
1107 }
1108
1109 int
1110 rte_eth_dev_set_link_up(uint8_t port_id)
1111 {
1112         struct rte_eth_dev *dev;
1113
1114         /* This function is only safe when called from the primary process
1115          * in a multi-process setup*/
1116         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1117
1118         if (!rte_eth_dev_is_valid_port(port_id)) {
1119                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1120                 return -EINVAL;
1121         }
1122
1123         dev = &rte_eth_devices[port_id];
1124
1125         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1126         return (*dev->dev_ops->dev_set_link_up)(dev);
1127 }
1128
1129 int
1130 rte_eth_dev_set_link_down(uint8_t port_id)
1131 {
1132         struct rte_eth_dev *dev;
1133
1134         /* This function is only safe when called from the primary process
1135          * in a multi-process setup*/
1136         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1137
1138         if (!rte_eth_dev_is_valid_port(port_id)) {
1139                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1140                 return -EINVAL;
1141         }
1142
1143         dev = &rte_eth_devices[port_id];
1144
1145         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1146         return (*dev->dev_ops->dev_set_link_down)(dev);
1147 }
1148
1149 void
1150 rte_eth_dev_close(uint8_t port_id)
1151 {
1152         struct rte_eth_dev *dev;
1153
1154         /* This function is only safe when called from the primary process
1155          * in a multi-process setup*/
1156         PROC_PRIMARY_OR_RET();
1157
1158         if (!rte_eth_dev_is_valid_port(port_id)) {
1159                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1160                 return;
1161         }
1162
1163         dev = &rte_eth_devices[port_id];
1164
1165         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1166         dev->data->dev_started = 0;
1167         (*dev->dev_ops->dev_close)(dev);
1168 }
1169
1170 int
1171 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1172                        uint16_t nb_rx_desc, unsigned int socket_id,
1173                        const struct rte_eth_rxconf *rx_conf,
1174                        struct rte_mempool *mp)
1175 {
1176         int ret;
1177         uint32_t mbp_buf_size;
1178         struct rte_eth_dev *dev;
1179         struct rte_pktmbuf_pool_private *mbp_priv;
1180         struct rte_eth_dev_info dev_info;
1181
1182         /* This function is only safe when called from the primary process
1183          * in a multi-process setup*/
1184         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1185
1186         if (!rte_eth_dev_is_valid_port(port_id)) {
1187                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1188                 return (-EINVAL);
1189         }
1190
1191         dev = &rte_eth_devices[port_id];
1192         if (rx_queue_id >= dev->data->nb_rx_queues) {
1193                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1194                 return (-EINVAL);
1195         }
1196
1197         if (dev->data->dev_started) {
1198                 PMD_DEBUG_TRACE(
1199                     "port %d must be stopped to allow configuration\n", port_id);
1200                 return -EBUSY;
1201         }
1202
1203         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1204         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1205
1206         /*
1207          * Check the size of the mbuf data buffer.
1208          * This value must be provided in the private data of the memory pool.
1209          * First check that the memory pool has a valid private data.
1210          */
1211         rte_eth_dev_info_get(port_id, &dev_info);
1212         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1213                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1214                                 mp->name, (int) mp->private_data_size,
1215                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1216                 return (-ENOSPC);
1217         }
1218         mbp_priv = rte_mempool_get_priv(mp);
1219         mbp_buf_size = mbp_priv->mbuf_data_room_size;
1220
1221         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1222                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1223                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1224                                 "=%d)\n",
1225                                 mp->name,
1226                                 (int)mbp_buf_size,
1227                                 (int)(RTE_PKTMBUF_HEADROOM +
1228                                       dev_info.min_rx_bufsize),
1229                                 (int)RTE_PKTMBUF_HEADROOM,
1230                                 (int)dev_info.min_rx_bufsize);
1231                 return (-EINVAL);
1232         }
1233
1234         if (rx_conf == NULL)
1235                 rx_conf = &dev_info.default_rxconf;
1236
1237         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1238                                               socket_id, rx_conf, mp);
1239         if (!ret) {
1240                 if (!dev->data->min_rx_buf_size ||
1241                     dev->data->min_rx_buf_size > mbp_buf_size)
1242                         dev->data->min_rx_buf_size = mbp_buf_size;
1243         }
1244
1245         return ret;
1246 }
1247
1248 int
1249 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1250                        uint16_t nb_tx_desc, unsigned int socket_id,
1251                        const struct rte_eth_txconf *tx_conf)
1252 {
1253         struct rte_eth_dev *dev;
1254         struct rte_eth_dev_info dev_info;
1255
1256         /* This function is only safe when called from the primary process
1257          * in a multi-process setup*/
1258         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1259
1260         if (!rte_eth_dev_is_valid_port(port_id)) {
1261                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1262                 return (-EINVAL);
1263         }
1264
1265         dev = &rte_eth_devices[port_id];
1266         if (tx_queue_id >= dev->data->nb_tx_queues) {
1267                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1268                 return (-EINVAL);
1269         }
1270
1271         if (dev->data->dev_started) {
1272                 PMD_DEBUG_TRACE(
1273                     "port %d must be stopped to allow configuration\n", port_id);
1274                 return -EBUSY;
1275         }
1276
1277         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1278         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1279
1280         rte_eth_dev_info_get(port_id, &dev_info);
1281
1282         if (tx_conf == NULL)
1283                 tx_conf = &dev_info.default_txconf;
1284
1285         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1286                                                socket_id, tx_conf);
1287 }
1288
1289 void
1290 rte_eth_promiscuous_enable(uint8_t port_id)
1291 {
1292         struct rte_eth_dev *dev;
1293
1294         if (!rte_eth_dev_is_valid_port(port_id)) {
1295                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1296                 return;
1297         }
1298
1299         dev = &rte_eth_devices[port_id];
1300
1301         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1302         (*dev->dev_ops->promiscuous_enable)(dev);
1303         dev->data->promiscuous = 1;
1304 }
1305
1306 void
1307 rte_eth_promiscuous_disable(uint8_t port_id)
1308 {
1309         struct rte_eth_dev *dev;
1310
1311         if (!rte_eth_dev_is_valid_port(port_id)) {
1312                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1313                 return;
1314         }
1315
1316         dev = &rte_eth_devices[port_id];
1317
1318         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1319         dev->data->promiscuous = 0;
1320         (*dev->dev_ops->promiscuous_disable)(dev);
1321 }
1322
1323 int
1324 rte_eth_promiscuous_get(uint8_t port_id)
1325 {
1326         struct rte_eth_dev *dev;
1327
1328         if (!rte_eth_dev_is_valid_port(port_id)) {
1329                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1330                 return -1;
1331         }
1332
1333         dev = &rte_eth_devices[port_id];
1334         return dev->data->promiscuous;
1335 }
1336
1337 void
1338 rte_eth_allmulticast_enable(uint8_t port_id)
1339 {
1340         struct rte_eth_dev *dev;
1341
1342         if (!rte_eth_dev_is_valid_port(port_id)) {
1343                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1344                 return;
1345         }
1346
1347         dev = &rte_eth_devices[port_id];
1348
1349         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1350         (*dev->dev_ops->allmulticast_enable)(dev);
1351         dev->data->all_multicast = 1;
1352 }
1353
1354 void
1355 rte_eth_allmulticast_disable(uint8_t port_id)
1356 {
1357         struct rte_eth_dev *dev;
1358
1359         if (!rte_eth_dev_is_valid_port(port_id)) {
1360                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1361                 return;
1362         }
1363
1364         dev = &rte_eth_devices[port_id];
1365
1366         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1367         dev->data->all_multicast = 0;
1368         (*dev->dev_ops->allmulticast_disable)(dev);
1369 }
1370
1371 int
1372 rte_eth_allmulticast_get(uint8_t port_id)
1373 {
1374         struct rte_eth_dev *dev;
1375
1376         if (!rte_eth_dev_is_valid_port(port_id)) {
1377                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1378                 return -1;
1379         }
1380
1381         dev = &rte_eth_devices[port_id];
1382         return dev->data->all_multicast;
1383 }
1384
1385 static inline int
1386 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1387                                 struct rte_eth_link *link)
1388 {
1389         struct rte_eth_link *dst = link;
1390         struct rte_eth_link *src = &(dev->data->dev_link);
1391
1392         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1393                                         *(uint64_t *)src) == 0)
1394                 return -1;
1395
1396         return 0;
1397 }
1398
1399 void
1400 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1401 {
1402         struct rte_eth_dev *dev;
1403
1404         if (!rte_eth_dev_is_valid_port(port_id)) {
1405                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1406                 return;
1407         }
1408
1409         dev = &rte_eth_devices[port_id];
1410
1411         if (dev->data->dev_conf.intr_conf.lsc != 0)
1412                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1413         else {
1414                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1415                 (*dev->dev_ops->link_update)(dev, 1);
1416                 *eth_link = dev->data->dev_link;
1417         }
1418 }
1419
1420 void
1421 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1422 {
1423         struct rte_eth_dev *dev;
1424
1425         if (!rte_eth_dev_is_valid_port(port_id)) {
1426                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1427                 return;
1428         }
1429
1430         dev = &rte_eth_devices[port_id];
1431
1432         if (dev->data->dev_conf.intr_conf.lsc != 0)
1433                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1434         else {
1435                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1436                 (*dev->dev_ops->link_update)(dev, 0);
1437                 *eth_link = dev->data->dev_link;
1438         }
1439 }
1440
1441 int
1442 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1443 {
1444         struct rte_eth_dev *dev;
1445
1446         if (!rte_eth_dev_is_valid_port(port_id)) {
1447                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1448                 return (-ENODEV);
1449         }
1450
1451         dev = &rte_eth_devices[port_id];
1452         memset(stats, 0, sizeof(*stats));
1453
1454         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1455         (*dev->dev_ops->stats_get)(dev, stats);
1456         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1457         return 0;
1458 }
1459
1460 void
1461 rte_eth_stats_reset(uint8_t port_id)
1462 {
1463         struct rte_eth_dev *dev;
1464
1465         if (!rte_eth_dev_is_valid_port(port_id)) {
1466                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1467                 return;
1468         }
1469
1470         dev = &rte_eth_devices[port_id];
1471
1472         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1473         (*dev->dev_ops->stats_reset)(dev);
1474 }
1475
1476 /* retrieve ethdev extended statistics */
1477 int
1478 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1479         unsigned n)
1480 {
1481         struct rte_eth_stats eth_stats;
1482         struct rte_eth_dev *dev;
1483         unsigned count, i, q;
1484         uint64_t val;
1485         char *stats_ptr;
1486
1487         if (!rte_eth_dev_is_valid_port(port_id)) {
1488                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1489                 return -1;
1490         }
1491
1492         dev = &rte_eth_devices[port_id];
1493
1494         /* implemented by the driver */
1495         if (dev->dev_ops->xstats_get != NULL)
1496                 return (*dev->dev_ops->xstats_get)(dev, xstats, n);
1497
1498         /* else, return generic statistics */
1499         count = RTE_NB_STATS;
1500         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1501         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1502         if (n < count)
1503                 return count;
1504
1505         /* now fill the xstats structure */
1506
1507         count = 0;
1508         memset(&eth_stats, 0, sizeof(eth_stats));
1509         rte_eth_stats_get(port_id, &eth_stats);
1510
1511         /* global stats */
1512         for (i = 0; i < RTE_NB_STATS; i++) {
1513                 stats_ptr = (char *)&eth_stats + rte_stats_strings[i].offset;
1514                 val = *(uint64_t *)stats_ptr;
1515                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1516                         "%s", rte_stats_strings[i].name);
1517                 xstats[count++].value = val;
1518         }
1519
1520         /* per-rxq stats */
1521         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1522                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1523                         stats_ptr = (char *)&eth_stats;
1524                         stats_ptr += rte_rxq_stats_strings[i].offset;
1525                         stats_ptr += q * sizeof(uint64_t);
1526                         val = *(uint64_t *)stats_ptr;
1527                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1528                                 "rx_queue_%u_%s", q,
1529                                 rte_rxq_stats_strings[i].name);
1530                         xstats[count++].value = val;
1531                 }
1532         }
1533
1534         /* per-txq stats */
1535         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1536                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1537                         stats_ptr = (char *)&eth_stats;
1538                         stats_ptr += rte_txq_stats_strings[i].offset;
1539                         stats_ptr += q * sizeof(uint64_t);
1540                         val = *(uint64_t *)stats_ptr;
1541                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1542                                 "tx_queue_%u_%s", q,
1543                                 rte_txq_stats_strings[i].name);
1544                         xstats[count++].value = val;
1545                 }
1546         }
1547
1548         return count;
1549 }
1550
1551 /* reset ethdev extended statistics */
1552 void
1553 rte_eth_xstats_reset(uint8_t port_id)
1554 {
1555         struct rte_eth_dev *dev;
1556
1557         if (!rte_eth_dev_is_valid_port(port_id)) {
1558                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1559                 return;
1560         }
1561
1562         dev = &rte_eth_devices[port_id];
1563
1564         /* implemented by the driver */
1565         if (dev->dev_ops->xstats_reset != NULL) {
1566                 (*dev->dev_ops->xstats_reset)(dev);
1567                 return;
1568         }
1569
1570         /* fallback to default */
1571         rte_eth_stats_reset(port_id);
1572 }
1573
1574 static int
1575 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1576                 uint8_t is_rx)
1577 {
1578         struct rte_eth_dev *dev;
1579
1580         if (!rte_eth_dev_is_valid_port(port_id)) {
1581                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1582                 return -ENODEV;
1583         }
1584
1585         dev = &rte_eth_devices[port_id];
1586
1587         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1588         return (*dev->dev_ops->queue_stats_mapping_set)
1589                         (dev, queue_id, stat_idx, is_rx);
1590 }
1591
1592
1593 int
1594 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1595                 uint8_t stat_idx)
1596 {
1597         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1598                         STAT_QMAP_TX);
1599 }
1600
1601
1602 int
1603 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1604                 uint8_t stat_idx)
1605 {
1606         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1607                         STAT_QMAP_RX);
1608 }
1609
1610
1611 void
1612 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1613 {
1614         struct rte_eth_dev *dev;
1615
1616         if (!rte_eth_dev_is_valid_port(port_id)) {
1617                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1618                 return;
1619         }
1620
1621         dev = &rte_eth_devices[port_id];
1622
1623         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1624
1625         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1626         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1627         dev_info->pci_dev = dev->pci_dev;
1628         if (dev->driver)
1629                 dev_info->driver_name = dev->driver->pci_drv.name;
1630 }
1631
1632 void
1633 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1634 {
1635         struct rte_eth_dev *dev;
1636
1637         if (!rte_eth_dev_is_valid_port(port_id)) {
1638                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1639                 return;
1640         }
1641
1642         dev = &rte_eth_devices[port_id];
1643         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1644 }
1645
1646
1647 int
1648 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1649 {
1650         struct rte_eth_dev *dev;
1651
1652         if (!rte_eth_dev_is_valid_port(port_id)) {
1653                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1654                 return (-ENODEV);
1655         }
1656
1657         dev = &rte_eth_devices[port_id];
1658         *mtu = dev->data->mtu;
1659         return 0;
1660 }
1661
1662 int
1663 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1664 {
1665         int ret;
1666         struct rte_eth_dev *dev;
1667
1668         if (!rte_eth_dev_is_valid_port(port_id)) {
1669                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1670                 return (-ENODEV);
1671         }
1672
1673         dev = &rte_eth_devices[port_id];
1674         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1675
1676         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1677         if (!ret)
1678                 dev->data->mtu = mtu;
1679
1680         return ret;
1681 }
1682
1683 int
1684 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1685 {
1686         struct rte_eth_dev *dev;
1687
1688         if (!rte_eth_dev_is_valid_port(port_id)) {
1689                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1690                 return (-ENODEV);
1691         }
1692
1693         dev = &rte_eth_devices[port_id];
1694         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1695                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1696                 return (-ENOSYS);
1697         }
1698
1699         if (vlan_id > 4095) {
1700                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1701                                 port_id, (unsigned) vlan_id);
1702                 return (-EINVAL);
1703         }
1704         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1705         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1706         return (0);
1707 }
1708
1709 int
1710 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1711 {
1712         struct rte_eth_dev *dev;
1713
1714         if (!rte_eth_dev_is_valid_port(port_id)) {
1715                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1716                 return (-ENODEV);
1717         }
1718
1719         dev = &rte_eth_devices[port_id];
1720         if (rx_queue_id >= dev->data->nb_rx_queues) {
1721                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1722                 return (-EINVAL);
1723         }
1724
1725         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1726         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1727
1728         return (0);
1729 }
1730
1731 int
1732 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1733 {
1734         struct rte_eth_dev *dev;
1735
1736         if (!rte_eth_dev_is_valid_port(port_id)) {
1737                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1738                 return (-ENODEV);
1739         }
1740
1741         dev = &rte_eth_devices[port_id];
1742         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1743         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1744
1745         return (0);
1746 }
1747
1748 int
1749 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1750 {
1751         struct rte_eth_dev *dev;
1752         int ret = 0;
1753         int mask = 0;
1754         int cur, org = 0;
1755
1756         if (!rte_eth_dev_is_valid_port(port_id)) {
1757                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1758                 return (-ENODEV);
1759         }
1760
1761         dev = &rte_eth_devices[port_id];
1762
1763         /*check which option changed by application*/
1764         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1765         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1766         if (cur != org){
1767                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1768                 mask |= ETH_VLAN_STRIP_MASK;
1769         }
1770
1771         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1772         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1773         if (cur != org){
1774                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1775                 mask |= ETH_VLAN_FILTER_MASK;
1776         }
1777
1778         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1779         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1780         if (cur != org){
1781                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1782                 mask |= ETH_VLAN_EXTEND_MASK;
1783         }
1784
1785         /*no change*/
1786         if(mask == 0)
1787                 return ret;
1788
1789         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1790         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1791
1792         return ret;
1793 }
1794
1795 int
1796 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1797 {
1798         struct rte_eth_dev *dev;
1799         int ret = 0;
1800
1801         if (!rte_eth_dev_is_valid_port(port_id)) {
1802                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1803                 return (-ENODEV);
1804         }
1805
1806         dev = &rte_eth_devices[port_id];
1807
1808         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1809                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1810
1811         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1812                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1813
1814         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1815                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1816
1817         return ret;
1818 }
1819
1820 int
1821 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1822 {
1823         struct rte_eth_dev *dev;
1824
1825         if (!rte_eth_dev_is_valid_port(port_id)) {
1826                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1827                 return (-ENODEV);
1828         }
1829
1830         dev = &rte_eth_devices[port_id];
1831         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1832         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1833
1834         return 0;
1835 }
1836
1837 int
1838 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1839                                       struct rte_fdir_filter *fdir_filter,
1840                                       uint8_t queue)
1841 {
1842         struct rte_eth_dev *dev;
1843
1844         if (!rte_eth_dev_is_valid_port(port_id)) {
1845                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1846                 return (-ENODEV);
1847         }
1848
1849         dev = &rte_eth_devices[port_id];
1850
1851         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1852                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1853                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1854                 return (-ENOSYS);
1855         }
1856
1857         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1858              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1859             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1860                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1861                                 "None l4type, source & destinations ports " \
1862                                 "should be null!\n");
1863                 return (-EINVAL);
1864         }
1865
1866         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1867         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1868                                                                 queue);
1869 }
1870
1871 int
1872 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1873                                          struct rte_fdir_filter *fdir_filter,
1874                                          uint8_t queue)
1875 {
1876         struct rte_eth_dev *dev;
1877
1878         if (!rte_eth_dev_is_valid_port(port_id)) {
1879                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1880                 return (-ENODEV);
1881         }
1882
1883         dev = &rte_eth_devices[port_id];
1884
1885         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1886                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1887                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1888                 return (-ENOSYS);
1889         }
1890
1891         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1892              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1893             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1894                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1895                                 "None l4type, source & destinations ports " \
1896                                 "should be null!\n");
1897                 return (-EINVAL);
1898         }
1899
1900         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1901         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1902                                                                 queue);
1903
1904 }
1905
1906 int
1907 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1908                                          struct rte_fdir_filter *fdir_filter)
1909 {
1910         struct rte_eth_dev *dev;
1911
1912         if (!rte_eth_dev_is_valid_port(port_id)) {
1913                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1914                 return (-ENODEV);
1915         }
1916
1917         dev = &rte_eth_devices[port_id];
1918
1919         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1920                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1921                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1922                 return (-ENOSYS);
1923         }
1924
1925         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1926              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1927             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1928                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1929                                 "None l4type source & destinations ports " \
1930                                 "should be null!\n");
1931                 return (-EINVAL);
1932         }
1933
1934         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1935         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1936 }
1937
1938 int
1939 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1940 {
1941         struct rte_eth_dev *dev;
1942
1943         if (!rte_eth_dev_is_valid_port(port_id)) {
1944                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1945                 return (-ENODEV);
1946         }
1947
1948         dev = &rte_eth_devices[port_id];
1949         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1950                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1951                 return (-ENOSYS);
1952         }
1953
1954         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1955
1956         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1957         return (0);
1958 }
1959
1960 int
1961 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1962                                     struct rte_fdir_filter *fdir_filter,
1963                                     uint16_t soft_id, uint8_t queue,
1964                                     uint8_t drop)
1965 {
1966         struct rte_eth_dev *dev;
1967
1968         if (!rte_eth_dev_is_valid_port(port_id)) {
1969                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1970                 return (-ENODEV);
1971         }
1972
1973         dev = &rte_eth_devices[port_id];
1974
1975         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1976                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1977                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1978                 return (-ENOSYS);
1979         }
1980
1981         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1982              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1983             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1984                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1985                                 "None l4type, source & destinations ports " \
1986                                 "should be null!\n");
1987                 return (-EINVAL);
1988         }
1989
1990         /* For now IPv6 is not supported with perfect filter */
1991         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1992                 return (-ENOTSUP);
1993
1994         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1995         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1996                                                                 soft_id, queue,
1997                                                                 drop);
1998 }
1999
2000 int
2001 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
2002                                        struct rte_fdir_filter *fdir_filter,
2003                                        uint16_t soft_id, uint8_t queue,
2004                                        uint8_t drop)
2005 {
2006         struct rte_eth_dev *dev;
2007
2008         if (!rte_eth_dev_is_valid_port(port_id)) {
2009                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2010                 return (-ENODEV);
2011         }
2012
2013         dev = &rte_eth_devices[port_id];
2014
2015         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2016                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2017                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2018                 return (-ENOSYS);
2019         }
2020
2021         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2022              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2023             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2024                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2025                                 "None l4type, source & destinations ports " \
2026                                 "should be null!\n");
2027                 return (-EINVAL);
2028         }
2029
2030         /* For now IPv6 is not supported with perfect filter */
2031         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2032                 return (-ENOTSUP);
2033
2034         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
2035         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
2036                                                         soft_id, queue, drop);
2037 }
2038
2039 int
2040 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
2041                                        struct rte_fdir_filter *fdir_filter,
2042                                        uint16_t soft_id)
2043 {
2044         struct rte_eth_dev *dev;
2045
2046         if (!rte_eth_dev_is_valid_port(port_id)) {
2047                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2048                 return (-ENODEV);
2049         }
2050
2051         dev = &rte_eth_devices[port_id];
2052
2053         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2054                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2055                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2056                 return (-ENOSYS);
2057         }
2058
2059         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2060              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2061             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2062                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2063                                 "None l4type, source & destinations ports " \
2064                                 "should be null!\n");
2065                 return (-EINVAL);
2066         }
2067
2068         /* For now IPv6 is not supported with perfect filter */
2069         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2070                 return (-ENOTSUP);
2071
2072         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
2073         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
2074                                                                 soft_id);
2075 }
2076
2077 int
2078 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
2079 {
2080         struct rte_eth_dev *dev;
2081
2082         if (!rte_eth_dev_is_valid_port(port_id)) {
2083                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2084                 return (-ENODEV);
2085         }
2086
2087         dev = &rte_eth_devices[port_id];
2088         if (! (dev->data->dev_conf.fdir_conf.mode)) {
2089                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2090                 return (-ENOSYS);
2091         }
2092
2093         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
2094         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
2095 }
2096
2097 int
2098 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2099 {
2100         struct rte_eth_dev *dev;
2101
2102         if (!rte_eth_dev_is_valid_port(port_id)) {
2103                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2104                 return (-ENODEV);
2105         }
2106
2107         dev = &rte_eth_devices[port_id];
2108         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2109         memset(fc_conf, 0, sizeof(*fc_conf));
2110         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2111 }
2112
2113 int
2114 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2115 {
2116         struct rte_eth_dev *dev;
2117
2118         if (!rte_eth_dev_is_valid_port(port_id)) {
2119                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2120                 return (-ENODEV);
2121         }
2122
2123         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2124                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2125                 return (-EINVAL);
2126         }
2127
2128         dev = &rte_eth_devices[port_id];
2129         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2130         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2131 }
2132
2133 int
2134 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
2135 {
2136         struct rte_eth_dev *dev;
2137
2138         if (!rte_eth_dev_is_valid_port(port_id)) {
2139                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2140                 return (-ENODEV);
2141         }
2142
2143         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2144                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2145                 return (-EINVAL);
2146         }
2147
2148         dev = &rte_eth_devices[port_id];
2149         /* High water, low water validation are device specific */
2150         if  (*dev->dev_ops->priority_flow_ctrl_set)
2151                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2152         return (-ENOTSUP);
2153 }
2154
2155 static inline int
2156 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2157                         uint16_t reta_size)
2158 {
2159         uint16_t i, num;
2160
2161         if (!reta_conf)
2162                 return -EINVAL;
2163
2164         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
2165                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
2166                                                         RTE_RETA_GROUP_SIZE);
2167                 return -EINVAL;
2168         }
2169
2170         num = reta_size / RTE_RETA_GROUP_SIZE;
2171         for (i = 0; i < num; i++) {
2172                 if (reta_conf[i].mask)
2173                         return 0;
2174         }
2175
2176         return -EINVAL;
2177 }
2178
2179 static inline int
2180 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2181                          uint16_t reta_size,
2182                          uint8_t max_rxq)
2183 {
2184         uint16_t i, idx, shift;
2185
2186         if (!reta_conf)
2187                 return -EINVAL;
2188
2189         if (max_rxq == 0) {
2190                 PMD_DEBUG_TRACE("No receive queue is available\n");
2191                 return -EINVAL;
2192         }
2193
2194         for (i = 0; i < reta_size; i++) {
2195                 idx = i / RTE_RETA_GROUP_SIZE;
2196                 shift = i % RTE_RETA_GROUP_SIZE;
2197                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2198                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2199                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2200                                 "the maximum rxq index: %u\n", idx, shift,
2201                                 reta_conf[idx].reta[shift], max_rxq);
2202                         return -EINVAL;
2203                 }
2204         }
2205
2206         return 0;
2207 }
2208
2209 int
2210 rte_eth_dev_rss_reta_update(uint8_t port_id,
2211                             struct rte_eth_rss_reta_entry64 *reta_conf,
2212                             uint16_t reta_size)
2213 {
2214         struct rte_eth_dev *dev;
2215         int ret;
2216
2217         if (!rte_eth_dev_is_valid_port(port_id)) {
2218                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2219                 return -ENODEV;
2220         }
2221
2222         /* Check mask bits */
2223         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2224         if (ret < 0)
2225                 return ret;
2226
2227         dev = &rte_eth_devices[port_id];
2228
2229         /* Check entry value */
2230         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2231                                 dev->data->nb_rx_queues);
2232         if (ret < 0)
2233                 return ret;
2234
2235         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2236         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2237 }
2238
2239 int
2240 rte_eth_dev_rss_reta_query(uint8_t port_id,
2241                            struct rte_eth_rss_reta_entry64 *reta_conf,
2242                            uint16_t reta_size)
2243 {
2244         struct rte_eth_dev *dev;
2245         int ret;
2246
2247         if (port_id >= nb_ports) {
2248                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2249                 return -ENODEV;
2250         }
2251
2252         /* Check mask bits */
2253         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2254         if (ret < 0)
2255                 return ret;
2256
2257         dev = &rte_eth_devices[port_id];
2258         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2259         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2260 }
2261
2262 int
2263 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2264 {
2265         struct rte_eth_dev *dev;
2266         uint16_t rss_hash_protos;
2267
2268         if (!rte_eth_dev_is_valid_port(port_id)) {
2269                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2270                 return (-ENODEV);
2271         }
2272
2273         rss_hash_protos = rss_conf->rss_hf;
2274         if ((rss_hash_protos != 0) &&
2275             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2276                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2277                                 rss_hash_protos);
2278                 return (-EINVAL);
2279         }
2280         dev = &rte_eth_devices[port_id];
2281         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2282         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2283 }
2284
2285 int
2286 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2287                               struct rte_eth_rss_conf *rss_conf)
2288 {
2289         struct rte_eth_dev *dev;
2290
2291         if (!rte_eth_dev_is_valid_port(port_id)) {
2292                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2293                 return (-ENODEV);
2294         }
2295
2296         dev = &rte_eth_devices[port_id];
2297         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2298         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2299 }
2300
2301 int
2302 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2303                            struct rte_eth_udp_tunnel *udp_tunnel)
2304 {
2305         struct rte_eth_dev *dev;
2306
2307         if (!rte_eth_dev_is_valid_port(port_id)) {
2308                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2309                 return -ENODEV;
2310         }
2311
2312         if (udp_tunnel == NULL) {
2313                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2314                 return -EINVAL;
2315         }
2316
2317         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2318                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2319                 return -EINVAL;
2320         }
2321
2322         dev = &rte_eth_devices[port_id];
2323         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2324         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2325 }
2326
2327 int
2328 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2329                               struct rte_eth_udp_tunnel *udp_tunnel)
2330 {
2331         struct rte_eth_dev *dev;
2332
2333         if (!rte_eth_dev_is_valid_port(port_id)) {
2334                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2335                 return -ENODEV;
2336         }
2337
2338         dev = &rte_eth_devices[port_id];
2339
2340         if (udp_tunnel == NULL) {
2341                 PMD_DEBUG_TRACE("Invalid udp_tunnel parametr\n");
2342                 return -EINVAL;
2343         }
2344
2345         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2346                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2347                 return -EINVAL;
2348         }
2349
2350         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2351         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2352 }
2353
2354 int
2355 rte_eth_led_on(uint8_t port_id)
2356 {
2357         struct rte_eth_dev *dev;
2358
2359         if (!rte_eth_dev_is_valid_port(port_id)) {
2360                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2361                 return (-ENODEV);
2362         }
2363
2364         dev = &rte_eth_devices[port_id];
2365         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2366         return ((*dev->dev_ops->dev_led_on)(dev));
2367 }
2368
2369 int
2370 rte_eth_led_off(uint8_t port_id)
2371 {
2372         struct rte_eth_dev *dev;
2373
2374         if (!rte_eth_dev_is_valid_port(port_id)) {
2375                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2376                 return (-ENODEV);
2377         }
2378
2379         dev = &rte_eth_devices[port_id];
2380         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2381         return ((*dev->dev_ops->dev_led_off)(dev));
2382 }
2383
2384 /*
2385  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2386  * an empty spot.
2387  */
2388 static inline int
2389 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2390 {
2391         struct rte_eth_dev_info dev_info;
2392         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2393         unsigned i;
2394
2395         rte_eth_dev_info_get(port_id, &dev_info);
2396
2397         for (i = 0; i < dev_info.max_mac_addrs; i++)
2398                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2399                         return i;
2400
2401         return -1;
2402 }
2403
2404 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
2405
2406 int
2407 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2408                         uint32_t pool)
2409 {
2410         struct rte_eth_dev *dev;
2411         int index;
2412         uint64_t pool_mask;
2413
2414         if (!rte_eth_dev_is_valid_port(port_id)) {
2415                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2416                 return (-ENODEV);
2417         }
2418
2419         dev = &rte_eth_devices[port_id];
2420         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2421
2422         if (is_zero_ether_addr(addr)) {
2423                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2424                         port_id);
2425                 return (-EINVAL);
2426         }
2427         if (pool >= ETH_64_POOLS) {
2428                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
2429                 return (-EINVAL);
2430         }
2431
2432         index = get_mac_addr_index(port_id, addr);
2433         if (index < 0) {
2434                 index = get_mac_addr_index(port_id, &null_mac_addr);
2435                 if (index < 0) {
2436                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2437                                 port_id);
2438                         return (-ENOSPC);
2439                 }
2440         } else {
2441                 pool_mask = dev->data->mac_pool_sel[index];
2442
2443                 /* Check if both MAC address and pool is alread there, and do nothing */
2444                 if (pool_mask & (1ULL << pool))
2445                         return 0;
2446         }
2447
2448         /* Update NIC */
2449         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2450
2451         /* Update address in NIC data structure */
2452         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2453
2454         /* Update pool bitmap in NIC data structure */
2455         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2456
2457         return 0;
2458 }
2459
2460 int
2461 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2462 {
2463         struct rte_eth_dev *dev;
2464         int index;
2465
2466         if (!rte_eth_dev_is_valid_port(port_id)) {
2467                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2468                 return (-ENODEV);
2469         }
2470
2471         dev = &rte_eth_devices[port_id];
2472         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2473
2474         index = get_mac_addr_index(port_id, addr);
2475         if (index == 0) {
2476                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2477                 return (-EADDRINUSE);
2478         } else if (index < 0)
2479                 return 0;  /* Do nothing if address wasn't found */
2480
2481         /* Update NIC */
2482         (*dev->dev_ops->mac_addr_remove)(dev, index);
2483
2484         /* Update address in NIC data structure */
2485         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2486
2487         /* reset pool bitmap */
2488         dev->data->mac_pool_sel[index] = 0;
2489
2490         return 0;
2491 }
2492
2493 int
2494 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2495                                 uint16_t rx_mode, uint8_t on)
2496 {
2497         uint16_t num_vfs;
2498         struct rte_eth_dev *dev;
2499         struct rte_eth_dev_info dev_info;
2500
2501         if (!rte_eth_dev_is_valid_port(port_id)) {
2502                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
2503                                 port_id);
2504                 return (-ENODEV);
2505         }
2506
2507         dev = &rte_eth_devices[port_id];
2508         rte_eth_dev_info_get(port_id, &dev_info);
2509
2510         num_vfs = dev_info.max_vfs;
2511         if (vf > num_vfs)
2512         {
2513                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2514                 return (-EINVAL);
2515         }
2516         if (rx_mode == 0)
2517         {
2518                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2519                 return (-EINVAL);
2520         }
2521         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2522         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2523 }
2524
2525 /*
2526  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2527  * an empty spot.
2528  */
2529 static inline int
2530 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2531 {
2532         struct rte_eth_dev_info dev_info;
2533         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2534         unsigned i;
2535
2536         rte_eth_dev_info_get(port_id, &dev_info);
2537         if (!dev->data->hash_mac_addrs)
2538                 return -1;
2539
2540         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2541                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2542                         ETHER_ADDR_LEN) == 0)
2543                         return i;
2544
2545         return -1;
2546 }
2547
2548 int
2549 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2550                                 uint8_t on)
2551 {
2552         int index;
2553         int ret;
2554         struct rte_eth_dev *dev;
2555
2556         if (!rte_eth_dev_is_valid_port(port_id)) {
2557                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2558                         port_id);
2559                 return (-ENODEV);
2560         }
2561
2562         dev = &rte_eth_devices[port_id];
2563         if (is_zero_ether_addr(addr)) {
2564                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2565                         port_id);
2566                 return (-EINVAL);
2567         }
2568
2569         index = get_hash_mac_addr_index(port_id, addr);
2570         /* Check if it's already there, and do nothing */
2571         if ((index >= 0) && (on))
2572                 return 0;
2573
2574         if (index < 0) {
2575                 if (!on) {
2576                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
2577                                 "set in UTA\n", port_id);
2578                         return (-EINVAL);
2579                 }
2580
2581                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2582                 if (index < 0) {
2583                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2584                                         port_id);
2585                         return (-ENOSPC);
2586                 }
2587         }
2588
2589         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2590         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2591         if (ret == 0) {
2592                 /* Update address in NIC data structure */
2593                 if (on)
2594                         ether_addr_copy(addr,
2595                                         &dev->data->hash_mac_addrs[index]);
2596                 else
2597                         ether_addr_copy(&null_mac_addr,
2598                                         &dev->data->hash_mac_addrs[index]);
2599         }
2600
2601         return ret;
2602 }
2603
2604 int
2605 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2606 {
2607         struct rte_eth_dev *dev;
2608
2609         if (!rte_eth_dev_is_valid_port(port_id)) {
2610                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2611                         port_id);
2612                 return (-ENODEV);
2613         }
2614
2615         dev = &rte_eth_devices[port_id];
2616
2617         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2618         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2619 }
2620
2621 int
2622 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
2623 {
2624         uint16_t num_vfs;
2625         struct rte_eth_dev *dev;
2626         struct rte_eth_dev_info dev_info;
2627
2628         if (!rte_eth_dev_is_valid_port(port_id)) {
2629                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2630                 return (-ENODEV);
2631         }
2632
2633         dev = &rte_eth_devices[port_id];
2634         rte_eth_dev_info_get(port_id, &dev_info);
2635
2636         num_vfs = dev_info.max_vfs;
2637         if (vf > num_vfs)
2638         {
2639                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2640                 return (-EINVAL);
2641         }
2642
2643         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2644         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2645 }
2646
2647 int
2648 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2649 {
2650         uint16_t num_vfs;
2651         struct rte_eth_dev *dev;
2652         struct rte_eth_dev_info dev_info;
2653
2654         if (!rte_eth_dev_is_valid_port(port_id)) {
2655                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2656                 return (-ENODEV);
2657         }
2658
2659         dev = &rte_eth_devices[port_id];
2660         rte_eth_dev_info_get(port_id, &dev_info);
2661
2662         num_vfs = dev_info.max_vfs;
2663         if (vf > num_vfs)
2664         {
2665                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2666                 return (-EINVAL);
2667         }
2668
2669         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2670         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2671 }
2672
2673 int
2674 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2675                                  uint64_t vf_mask,uint8_t vlan_on)
2676 {
2677         struct rte_eth_dev *dev;
2678
2679         if (!rte_eth_dev_is_valid_port(port_id)) {
2680                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2681                                 port_id);
2682                 return (-ENODEV);
2683         }
2684         dev = &rte_eth_devices[port_id];
2685
2686         if(vlan_id > ETHER_MAX_VLAN_ID)
2687         {
2688                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2689                         vlan_id);
2690                 return (-EINVAL);
2691         }
2692         if (vf_mask == 0)
2693         {
2694                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2695                 return (-EINVAL);
2696         }
2697
2698         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2699         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2700                                                 vf_mask,vlan_on);
2701 }
2702
2703 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2704                                         uint16_t tx_rate)
2705 {
2706         struct rte_eth_dev *dev;
2707         struct rte_eth_dev_info dev_info;
2708         struct rte_eth_link link;
2709
2710         if (!rte_eth_dev_is_valid_port(port_id)) {
2711                 PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
2712                                 port_id);
2713                 return -ENODEV;
2714         }
2715
2716         dev = &rte_eth_devices[port_id];
2717         rte_eth_dev_info_get(port_id, &dev_info);
2718         link = dev->data->dev_link;
2719
2720         if (queue_idx > dev_info.max_tx_queues) {
2721                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2722                                 "invalid queue id=%d\n", port_id, queue_idx);
2723                 return -EINVAL;
2724         }
2725
2726         if (tx_rate > link.link_speed) {
2727                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2728                                 "bigger than link speed= %d\n",
2729                         tx_rate, link.link_speed);
2730                 return -EINVAL;
2731         }
2732
2733         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2734         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2735 }
2736
2737 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2738                                 uint64_t q_msk)
2739 {
2740         struct rte_eth_dev *dev;
2741         struct rte_eth_dev_info dev_info;
2742         struct rte_eth_link link;
2743
2744         if (q_msk == 0)
2745                 return 0;
2746
2747         if (!rte_eth_dev_is_valid_port(port_id)) {
2748                 PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
2749                                 port_id);
2750                 return -ENODEV;
2751         }
2752
2753         dev = &rte_eth_devices[port_id];
2754         rte_eth_dev_info_get(port_id, &dev_info);
2755         link = dev->data->dev_link;
2756
2757         if (vf > dev_info.max_vfs) {
2758                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2759                                 "invalid vf id=%d\n", port_id, vf);
2760                 return -EINVAL;
2761         }
2762
2763         if (tx_rate > link.link_speed) {
2764                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2765                                 "bigger than link speed= %d\n",
2766                                 tx_rate, link.link_speed);
2767                 return -EINVAL;
2768         }
2769
2770         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2771         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2772 }
2773
2774 int
2775 rte_eth_mirror_rule_set(uint8_t port_id,
2776                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
2777                         uint8_t rule_id, uint8_t on)
2778 {
2779         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2780
2781         if (!rte_eth_dev_is_valid_port(port_id)) {
2782                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2783                 return (-ENODEV);
2784         }
2785
2786         if (mirror_conf->rule_type_mask == 0) {
2787                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2788                 return (-EINVAL);
2789         }
2790
2791         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2792                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
2793                         "be 0-%d\n",ETH_64_POOLS - 1);
2794                 return (-EINVAL);
2795         }
2796
2797         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
2798                 (mirror_conf->pool_mask == 0)) {
2799                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
2800                                 "be 0.\n");
2801                 return (-EINVAL);
2802         }
2803
2804         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2805         {
2806                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2807                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
2808                 return (-EINVAL);
2809         }
2810
2811         dev = &rte_eth_devices[port_id];
2812         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2813
2814         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2815 }
2816
2817 int
2818 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2819 {
2820         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2821
2822         if (!rte_eth_dev_is_valid_port(port_id)) {
2823                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2824                 return (-ENODEV);
2825         }
2826
2827         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2828         {
2829                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2830                         ETH_VMDQ_NUM_MIRROR_RULE-1);
2831                 return (-EINVAL);
2832         }
2833
2834         dev = &rte_eth_devices[port_id];
2835         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2836
2837         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2838 }
2839
2840 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2841 uint16_t
2842 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2843                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2844 {
2845         struct rte_eth_dev *dev;
2846
2847         if (!rte_eth_dev_is_valid_port(port_id)) {
2848                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2849                 return 0;
2850         }
2851
2852         dev = &rte_eth_devices[port_id];
2853         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2854         if (queue_id >= dev->data->nb_rx_queues) {
2855                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2856                 return 0;
2857         }
2858         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2859                                                 rx_pkts, nb_pkts);
2860 }
2861
2862 uint16_t
2863 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2864                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2865 {
2866         struct rte_eth_dev *dev;
2867
2868         if (!rte_eth_dev_is_valid_port(port_id)) {
2869                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2870                 return 0;
2871         }
2872
2873         dev = &rte_eth_devices[port_id];
2874
2875         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2876         if (queue_id >= dev->data->nb_tx_queues) {
2877                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2878                 return 0;
2879         }
2880         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2881                                                 tx_pkts, nb_pkts);
2882 }
2883
2884 uint32_t
2885 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2886 {
2887         struct rte_eth_dev *dev;
2888
2889         if (!rte_eth_dev_is_valid_port(port_id)) {
2890                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2891                 return 0;
2892         }
2893
2894         dev = &rte_eth_devices[port_id];
2895         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2896         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2897 }
2898
2899 int
2900 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2901 {
2902         struct rte_eth_dev *dev;
2903
2904         if (!rte_eth_dev_is_valid_port(port_id)) {
2905                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2906                 return (-ENODEV);
2907         }
2908
2909         dev = &rte_eth_devices[port_id];
2910         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2911         return (*dev->dev_ops->rx_descriptor_done)( \
2912                 dev->data->rx_queues[queue_id], offset);
2913 }
2914 #endif
2915
2916 int
2917 rte_eth_dev_callback_register(uint8_t port_id,
2918                         enum rte_eth_event_type event,
2919                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2920 {
2921         struct rte_eth_dev *dev;
2922         struct rte_eth_dev_callback *user_cb;
2923
2924         if (!cb_fn)
2925                 return (-EINVAL);
2926
2927         if (!rte_eth_dev_is_valid_port(port_id)) {
2928                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2929                 return (-EINVAL);
2930         }
2931
2932         dev = &rte_eth_devices[port_id];
2933         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2934
2935         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2936                 if (user_cb->cb_fn == cb_fn &&
2937                         user_cb->cb_arg == cb_arg &&
2938                         user_cb->event == event) {
2939                         break;
2940                 }
2941         }
2942
2943         /* create a new callback. */
2944         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2945                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2946                 user_cb->cb_fn = cb_fn;
2947                 user_cb->cb_arg = cb_arg;
2948                 user_cb->event = event;
2949                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2950         }
2951
2952         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2953         return ((user_cb == NULL) ? -ENOMEM : 0);
2954 }
2955
2956 int
2957 rte_eth_dev_callback_unregister(uint8_t port_id,
2958                         enum rte_eth_event_type event,
2959                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2960 {
2961         int ret;
2962         struct rte_eth_dev *dev;
2963         struct rte_eth_dev_callback *cb, *next;
2964
2965         if (!cb_fn)
2966                 return (-EINVAL);
2967
2968         if (!rte_eth_dev_is_valid_port(port_id)) {
2969                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2970                 return (-EINVAL);
2971         }
2972
2973         dev = &rte_eth_devices[port_id];
2974         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2975
2976         ret = 0;
2977         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2978
2979                 next = TAILQ_NEXT(cb, next);
2980
2981                 if (cb->cb_fn != cb_fn || cb->event != event ||
2982                                 (cb->cb_arg != (void *)-1 &&
2983                                 cb->cb_arg != cb_arg))
2984                         continue;
2985
2986                 /*
2987                  * if this callback is not executing right now,
2988                  * then remove it.
2989                  */
2990                 if (cb->active == 0) {
2991                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2992                         rte_free(cb);
2993                 } else {
2994                         ret = -EAGAIN;
2995                 }
2996         }
2997
2998         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2999         return (ret);
3000 }
3001
3002 void
3003 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3004         enum rte_eth_event_type event)
3005 {
3006         struct rte_eth_dev_callback *cb_lst;
3007         struct rte_eth_dev_callback dev_cb;
3008
3009         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3010         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3011                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3012                         continue;
3013                 dev_cb = *cb_lst;
3014                 cb_lst->active = 1;
3015                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3016                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3017                                                 dev_cb.cb_arg);
3018                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3019                 cb_lst->active = 0;
3020         }
3021         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3022 }
3023 #ifdef RTE_NIC_BYPASS
3024 int rte_eth_dev_bypass_init(uint8_t port_id)
3025 {
3026         struct rte_eth_dev *dev;
3027
3028         if (!rte_eth_dev_is_valid_port(port_id)) {
3029                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3030                 return (-ENODEV);
3031         }
3032
3033         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3034                 PMD_DEBUG_TRACE("Invalid port device\n");
3035                 return (-ENODEV);
3036         }
3037
3038         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
3039         (*dev->dev_ops->bypass_init)(dev);
3040         return 0;
3041 }
3042
3043 int
3044 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
3045 {
3046         struct rte_eth_dev *dev;
3047
3048         if (!rte_eth_dev_is_valid_port(port_id)) {
3049                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3050                 return (-ENODEV);
3051         }
3052
3053         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3054                 PMD_DEBUG_TRACE("Invalid port device\n");
3055                 return (-ENODEV);
3056         }
3057         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3058         (*dev->dev_ops->bypass_state_show)(dev, state);
3059         return 0;
3060 }
3061
3062 int
3063 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
3064 {
3065         struct rte_eth_dev *dev;
3066
3067         if (!rte_eth_dev_is_valid_port(port_id)) {
3068                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3069                 return (-ENODEV);
3070         }
3071
3072         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3073                 PMD_DEBUG_TRACE("Invalid port device\n");
3074                 return (-ENODEV);
3075         }
3076
3077         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
3078         (*dev->dev_ops->bypass_state_set)(dev, new_state);
3079         return 0;
3080 }
3081
3082 int
3083 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
3084 {
3085         struct rte_eth_dev *dev;
3086
3087         if (!rte_eth_dev_is_valid_port(port_id)) {
3088                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3089                 return (-ENODEV);
3090         }
3091
3092         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3093                 PMD_DEBUG_TRACE("Invalid port device\n");
3094                 return (-ENODEV);
3095         }
3096
3097         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3098         (*dev->dev_ops->bypass_event_show)(dev, event, state);
3099         return 0;
3100 }
3101
3102 int
3103 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
3104 {
3105         struct rte_eth_dev *dev;
3106
3107         if (!rte_eth_dev_is_valid_port(port_id)) {
3108                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3109                 return (-ENODEV);
3110         }
3111
3112         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3113                 PMD_DEBUG_TRACE("Invalid port device\n");
3114                 return (-ENODEV);
3115         }
3116
3117         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
3118         (*dev->dev_ops->bypass_event_set)(dev, event, state);
3119         return 0;
3120 }
3121
3122 int
3123 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
3124 {
3125         struct rte_eth_dev *dev;
3126
3127         if (!rte_eth_dev_is_valid_port(port_id)) {
3128                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3129                 return (-ENODEV);
3130         }
3131
3132         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3133                 PMD_DEBUG_TRACE("Invalid port device\n");
3134                 return (-ENODEV);
3135         }
3136
3137         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
3138         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
3139         return 0;
3140 }
3141
3142 int
3143 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
3144 {
3145         struct rte_eth_dev *dev;
3146
3147         if (!rte_eth_dev_is_valid_port(port_id)) {
3148                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3149                 return (-ENODEV);
3150         }
3151
3152         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3153                 PMD_DEBUG_TRACE("Invalid port device\n");
3154                 return (-ENODEV);
3155         }
3156
3157         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
3158         (*dev->dev_ops->bypass_ver_show)(dev, ver);
3159         return 0;
3160 }
3161
3162 int
3163 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
3164 {
3165         struct rte_eth_dev *dev;
3166
3167         if (!rte_eth_dev_is_valid_port(port_id)) {
3168                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3169                 return (-ENODEV);
3170         }
3171
3172         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3173                 PMD_DEBUG_TRACE("Invalid port device\n");
3174                 return (-ENODEV);
3175         }
3176
3177         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
3178         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
3179         return 0;
3180 }
3181
3182 int
3183 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
3184 {
3185         struct rte_eth_dev *dev;
3186
3187         if (!rte_eth_dev_is_valid_port(port_id)) {
3188                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3189                 return (-ENODEV);
3190         }
3191
3192         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3193                 PMD_DEBUG_TRACE("Invalid port device\n");
3194                 return (-ENODEV);
3195         }
3196
3197         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
3198         (*dev->dev_ops->bypass_wd_reset)(dev);
3199         return 0;
3200 }
3201 #endif
3202
3203 int
3204 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
3205 {
3206         struct rte_eth_dev *dev;
3207
3208         if (!rte_eth_dev_is_valid_port(port_id)) {
3209                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3210                 return -ENODEV;
3211         }
3212
3213         dev = &rte_eth_devices[port_id];
3214         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3215         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3216                                 RTE_ETH_FILTER_NOP, NULL);
3217 }
3218
3219 int
3220 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3221                        enum rte_filter_op filter_op, void *arg)
3222 {
3223         struct rte_eth_dev *dev;
3224
3225         if (!rte_eth_dev_is_valid_port(port_id)) {
3226                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3227                 return -ENODEV;
3228         }
3229
3230         dev = &rte_eth_devices[port_id];
3231         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3232         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3233 }
3234
3235 void *
3236 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3237                 rte_rxtx_callback_fn fn, void *user_param)
3238 {
3239 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3240         rte_errno = ENOTSUP;
3241         return NULL;
3242 #endif
3243         /* check input parameters */
3244         if (port_id >= nb_ports || fn == NULL ||
3245                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3246                 rte_errno = EINVAL;
3247                 return NULL;
3248         }
3249
3250         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3251
3252         if (cb == NULL) {
3253                 rte_errno = ENOMEM;
3254                 return NULL;
3255         }
3256
3257         cb->fn = fn;
3258         cb->param = user_param;
3259         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3260         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3261         return cb;
3262 }
3263
3264 void *
3265 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3266                 rte_rxtx_callback_fn fn, void *user_param)
3267 {
3268 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3269         rte_errno = ENOTSUP;
3270         return NULL;
3271 #endif
3272         /* check input parameters */
3273         if (port_id >= nb_ports || fn == NULL ||
3274                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3275                 rte_errno = EINVAL;
3276                 return NULL;
3277         }
3278
3279         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3280
3281         if (cb == NULL) {
3282                 rte_errno = ENOMEM;
3283                 return NULL;
3284         }
3285
3286         cb->fn = fn;
3287         cb->param = user_param;
3288         cb->next = rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3289         rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3290         return cb;
3291 }
3292
3293 int
3294 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3295                 struct rte_eth_rxtx_callback *user_cb)
3296 {
3297 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3298         return (-ENOTSUP);
3299 #endif
3300         /* Check input parameters. */
3301         if (port_id >= nb_ports || user_cb == NULL ||
3302                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3303                 return (-EINVAL);
3304         }
3305
3306         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3307         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3308         struct rte_eth_rxtx_callback *prev_cb;
3309
3310         /* Reset head pointer and remove user cb if first in the list. */
3311         if (cb == user_cb) {
3312                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3313                 return 0;
3314         }
3315
3316         /* Remove the user cb from the callback list. */
3317         do {
3318                 prev_cb = cb;
3319                 cb = cb->next;
3320
3321                 if (cb == user_cb) {
3322                         prev_cb->next = user_cb->next;
3323                         return 0;
3324                 }
3325
3326         } while (cb != NULL);
3327
3328         /* Callback wasn't found. */
3329         return (-EINVAL);
3330 }
3331
3332 int
3333 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3334                 struct rte_eth_rxtx_callback *user_cb)
3335 {
3336 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3337         return (-ENOTSUP);
3338 #endif
3339         /* Check input parameters. */
3340         if (port_id >= nb_ports || user_cb == NULL ||
3341                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3342                 return (-EINVAL);
3343         }
3344
3345         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3346         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3347         struct rte_eth_rxtx_callback *prev_cb;
3348
3349         /* Reset head pointer and remove user cb if first in the list. */
3350         if (cb == user_cb) {
3351                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3352                 return 0;
3353         }
3354
3355         /* Remove the user cb from the callback list. */
3356         do {
3357                 prev_cb = cb;
3358                 cb = cb->next;
3359
3360                 if (cb == user_cb) {
3361                         prev_cb->next = user_cb->next;
3362                         return 0;
3363                 }
3364
3365         } while (cb != NULL);
3366
3367         /* Callback wasn't found. */
3368         return (-EINVAL);
3369 }