ethdev: reset whole dev info structure before filling
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68 #include <rte_string_fns.h>
69
70 #include "rte_ether.h"
71 #include "rte_ethdev.h"
72
73 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
74 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
75                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
76         } while (0)
77 #else
78 #define PMD_DEBUG_TRACE(fmt, args...)
79 #endif
80
81 /* Macros for checking for restricting functions to primary instance only */
82 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
83         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
84                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
85                 return (retval); \
86         } \
87 } while(0)
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while(0)
94
95 /* Macros to check for invlaid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while(0)
102 #define FUNC_PTR_OR_RET(func) do { \
103         if ((func) == NULL) { \
104                 PMD_DEBUG_TRACE("Function not supported\n"); \
105                 return; \
106         } \
107 } while(0)
108
109 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
110 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
111 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
112 static uint8_t nb_ports = 0;
113
114 /* spinlock for eth device callbacks */
115 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
116
117 /* store statistics names and its offset in stats structure  */
118 struct rte_eth_xstats_name_off {
119         char name[RTE_ETH_XSTATS_NAME_SIZE];
120         unsigned offset;
121 };
122
123 static struct rte_eth_xstats_name_off rte_stats_strings[] = {
124          {"rx_packets", offsetof(struct rte_eth_stats, ipackets)},
125          {"tx_packets", offsetof(struct rte_eth_stats, opackets)},
126          {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)},
127          {"tx_bytes", offsetof(struct rte_eth_stats, obytes)},
128          {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
129          {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
130          {"rx_crc_errors", offsetof(struct rte_eth_stats, ibadcrc)},
131          {"rx_bad_length_errors", offsetof(struct rte_eth_stats, ibadlen)},
132          {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
133          {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)},
134          {"fdir_match", offsetof(struct rte_eth_stats, fdirmatch)},
135          {"fdir_miss", offsetof(struct rte_eth_stats, fdirmiss)},
136          {"tx_flow_control_xon", offsetof(struct rte_eth_stats, tx_pause_xon)},
137          {"rx_flow_control_xon", offsetof(struct rte_eth_stats, rx_pause_xon)},
138          {"tx_flow_control_xoff", offsetof(struct rte_eth_stats, tx_pause_xoff)},
139          {"rx_flow_control_xoff", offsetof(struct rte_eth_stats, rx_pause_xoff)},
140 };
141 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
142
143 static struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
144         {"rx_packets", offsetof(struct rte_eth_stats, q_ipackets)},
145         {"rx_bytes", offsetof(struct rte_eth_stats, q_ibytes)},
146 };
147 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
148                 sizeof(rte_rxq_stats_strings[0]))
149
150 static struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
151         {"tx_packets", offsetof(struct rte_eth_stats, q_opackets)},
152         {"tx_bytes", offsetof(struct rte_eth_stats, q_obytes)},
153         {"tx_errors", offsetof(struct rte_eth_stats, q_errors)},
154 };
155 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
156                 sizeof(rte_txq_stats_strings[0]))
157
158
159 /**
160  * The user application callback description.
161  *
162  * It contains callback address to be registered by user application,
163  * the pointer to the parameters for callback, and the event type.
164  */
165 struct rte_eth_dev_callback {
166         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
167         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
168         void *cb_arg;                           /**< Parameter for callback */
169         enum rte_eth_event_type event;          /**< Interrupt event type */
170         uint32_t active;                        /**< Callback is executing */
171 };
172
173 enum {
174         STAT_QMAP_TX = 0,
175         STAT_QMAP_RX
176 };
177
178 static inline void
179 rte_eth_dev_data_alloc(void)
180 {
181         const unsigned flags = 0;
182         const struct rte_memzone *mz;
183
184         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
185                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
186                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
187                                 rte_socket_id(), flags);
188         } else
189                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
190         if (mz == NULL)
191                 rte_panic("Cannot allocate memzone for ethernet port data\n");
192
193         rte_eth_dev_data = mz->addr;
194         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
195                 memset(rte_eth_dev_data, 0,
196                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
197 }
198
199 static struct rte_eth_dev *
200 rte_eth_dev_allocated(const char *name)
201 {
202         unsigned i;
203
204         for (i = 0; i < nb_ports; i++) {
205                 if (strcmp(rte_eth_devices[i].data->name, name) == 0)
206                         return &rte_eth_devices[i];
207         }
208         return NULL;
209 }
210
211 struct rte_eth_dev *
212 rte_eth_dev_allocate(const char *name)
213 {
214         struct rte_eth_dev *eth_dev;
215
216         if (nb_ports == RTE_MAX_ETHPORTS) {
217                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
218                 return NULL;
219         }
220
221         if (rte_eth_dev_data == NULL)
222                 rte_eth_dev_data_alloc();
223
224         if (rte_eth_dev_allocated(name) != NULL) {
225                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n", name);
226                 return NULL;
227         }
228
229         eth_dev = &rte_eth_devices[nb_ports];
230         eth_dev->data = &rte_eth_dev_data[nb_ports];
231         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
232         eth_dev->data->port_id = nb_ports++;
233         return eth_dev;
234 }
235
236 static int
237 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
238                  struct rte_pci_device *pci_dev)
239 {
240         struct eth_driver    *eth_drv;
241         struct rte_eth_dev *eth_dev;
242         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
243
244         int diag;
245
246         eth_drv = (struct eth_driver *)pci_drv;
247
248         /* Create unique Ethernet device name using PCI address */
249         snprintf(ethdev_name, RTE_ETH_NAME_MAX_LEN, "%d:%d.%d",
250                         pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
251
252         eth_dev = rte_eth_dev_allocate(ethdev_name);
253         if (eth_dev == NULL)
254                 return -ENOMEM;
255
256         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
257                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
258                                   eth_drv->dev_private_size,
259                                   CACHE_LINE_SIZE);
260                 if (eth_dev->data->dev_private == NULL)
261                         rte_panic("Cannot allocate memzone for private port data\n");
262         }
263         eth_dev->pci_dev = pci_dev;
264         eth_dev->driver = eth_drv;
265         eth_dev->data->rx_mbuf_alloc_failed = 0;
266
267         /* init user callbacks */
268         TAILQ_INIT(&(eth_dev->callbacks));
269
270         /*
271          * Set the default MTU.
272          */
273         eth_dev->data->mtu = ETHER_MTU;
274
275         /* Invoke PMD device initialization function */
276         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
277         if (diag == 0)
278                 return (0);
279
280         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
281                         " failed\n", pci_drv->name,
282                         (unsigned) pci_dev->id.vendor_id,
283                         (unsigned) pci_dev->id.device_id);
284         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
285                 rte_free(eth_dev->data->dev_private);
286         nb_ports--;
287         return diag;
288 }
289
290 /**
291  * Register an Ethernet [Poll Mode] driver.
292  *
293  * Function invoked by the initialization function of an Ethernet driver
294  * to simultaneously register itself as a PCI driver and as an Ethernet
295  * Poll Mode Driver.
296  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
297  * structure embedded in the *eth_drv* structure, after having stored the
298  * address of the rte_eth_dev_init() function in the *devinit* field of
299  * the *pci_drv* structure.
300  * During the PCI probing phase, the rte_eth_dev_init() function is
301  * invoked for each PCI [Ethernet device] matching the embedded PCI
302  * identifiers provided by the driver.
303  */
304 void
305 rte_eth_driver_register(struct eth_driver *eth_drv)
306 {
307         eth_drv->pci_drv.devinit = rte_eth_dev_init;
308         rte_eal_pci_register(&eth_drv->pci_drv);
309 }
310
311 int
312 rte_eth_dev_socket_id(uint8_t port_id)
313 {
314         if (port_id >= nb_ports)
315                 return -1;
316         return rte_eth_devices[port_id].pci_dev->numa_node;
317 }
318
319 uint8_t
320 rte_eth_dev_count(void)
321 {
322         return (nb_ports);
323 }
324
325 static int
326 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
327 {
328         uint16_t old_nb_queues = dev->data->nb_rx_queues;
329         void **rxq;
330         unsigned i;
331
332         if (dev->data->rx_queues == NULL) { /* first time configuration */
333                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
334                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
335                                 CACHE_LINE_SIZE);
336                 if (dev->data->rx_queues == NULL) {
337                         dev->data->nb_rx_queues = 0;
338                         return -(ENOMEM);
339                 }
340         } else { /* re-configure */
341                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
342
343                 rxq = dev->data->rx_queues;
344
345                 for (i = nb_queues; i < old_nb_queues; i++)
346                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
347                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
348                                 CACHE_LINE_SIZE);
349                 if (rxq == NULL)
350                         return -(ENOMEM);
351
352                 if (nb_queues > old_nb_queues)
353                         memset(rxq + old_nb_queues, 0,
354                                 sizeof(rxq[0]) * (nb_queues - old_nb_queues));
355
356                 dev->data->rx_queues = rxq;
357
358         }
359         dev->data->nb_rx_queues = nb_queues;
360         return (0);
361 }
362
363 int
364 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
365 {
366         struct rte_eth_dev *dev;
367
368         /* This function is only safe when called from the primary process
369          * in a multi-process setup*/
370         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
371
372         if (port_id >= nb_ports) {
373                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
374                 return -EINVAL;
375         }
376
377         dev = &rte_eth_devices[port_id];
378         if (rx_queue_id >= dev->data->nb_rx_queues) {
379                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
380                 return -EINVAL;
381         }
382
383         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
384
385         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
386
387 }
388
389 int
390 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
391 {
392         struct rte_eth_dev *dev;
393
394         /* This function is only safe when called from the primary process
395          * in a multi-process setup*/
396         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
397
398         if (port_id >= nb_ports) {
399                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
400                 return -EINVAL;
401         }
402
403         dev = &rte_eth_devices[port_id];
404         if (rx_queue_id >= dev->data->nb_rx_queues) {
405                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
406                 return -EINVAL;
407         }
408
409         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
410
411         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
412
413 }
414
415 int
416 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
417 {
418         struct rte_eth_dev *dev;
419
420         /* This function is only safe when called from the primary process
421          * in a multi-process setup*/
422         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
423
424         if (port_id >= nb_ports) {
425                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
426                 return -EINVAL;
427         }
428
429         dev = &rte_eth_devices[port_id];
430         if (tx_queue_id >= dev->data->nb_tx_queues) {
431                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
432                 return -EINVAL;
433         }
434
435         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
436
437         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
438
439 }
440
441 int
442 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
443 {
444         struct rte_eth_dev *dev;
445
446         /* This function is only safe when called from the primary process
447          * in a multi-process setup*/
448         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
449
450         if (port_id >= nb_ports) {
451                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
452                 return -EINVAL;
453         }
454
455         dev = &rte_eth_devices[port_id];
456         if (tx_queue_id >= dev->data->nb_tx_queues) {
457                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
458                 return -EINVAL;
459         }
460
461         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
462
463         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
464
465 }
466
467 static int
468 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
469 {
470         uint16_t old_nb_queues = dev->data->nb_tx_queues;
471         void **txq;
472         unsigned i;
473
474         if (dev->data->tx_queues == NULL) { /* first time configuration */
475                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
476                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
477                                 CACHE_LINE_SIZE);
478                 if (dev->data->tx_queues == NULL) {
479                         dev->data->nb_tx_queues = 0;
480                         return -(ENOMEM);
481                 }
482         } else { /* re-configure */
483                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
484
485                 txq = dev->data->tx_queues;
486
487                 for (i = nb_queues; i < old_nb_queues; i++)
488                         (*dev->dev_ops->tx_queue_release)(txq[i]);
489                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
490                                 CACHE_LINE_SIZE);
491                 if (txq == NULL)
492                         return -(ENOMEM);
493
494                 if (nb_queues > old_nb_queues)
495                         memset(txq + old_nb_queues, 0,
496                                 sizeof(txq[0]) * (nb_queues - old_nb_queues));
497
498                 dev->data->tx_queues = txq;
499
500         }
501         dev->data->nb_tx_queues = nb_queues;
502         return (0);
503 }
504
505 static int
506 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
507                       const struct rte_eth_conf *dev_conf)
508 {
509         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
510
511         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
512                 /* check multi-queue mode */
513                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ||
514                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
515                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
516                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
517                         /* SRIOV only works in VMDq enable mode */
518                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
519                                         " SRIOV active, "
520                                         "wrong VMDQ mq_mode rx %u tx %u\n",
521                                         port_id,
522                                         dev_conf->rxmode.mq_mode,
523                                         dev_conf->txmode.mq_mode);
524                         return (-EINVAL);
525                 }
526
527                 switch (dev_conf->rxmode.mq_mode) {
528                 case ETH_MQ_RX_VMDQ_RSS:
529                 case ETH_MQ_RX_VMDQ_DCB:
530                 case ETH_MQ_RX_VMDQ_DCB_RSS:
531                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
532                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
533                                         " SRIOV active, "
534                                         "unsupported VMDQ mq_mode rx %u\n",
535                                         port_id, dev_conf->rxmode.mq_mode);
536                         return (-EINVAL);
537                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
538                         /* if nothing mq mode configure, use default scheme */
539                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
540                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
541                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
542                         break;
543                 }
544
545                 switch (dev_conf->txmode.mq_mode) {
546                 case ETH_MQ_TX_VMDQ_DCB:
547                         /* DCB VMDQ in SRIOV mode, not implement yet */
548                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
549                                         " SRIOV active, "
550                                         "unsupported VMDQ mq_mode tx %u\n",
551                                         port_id, dev_conf->txmode.mq_mode);
552                         return (-EINVAL);
553                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
554                         /* if nothing mq mode configure, use default scheme */
555                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
556                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
557                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
558                         break;
559                 }
560
561                 /* check valid queue number */
562                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
563                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
564                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
565                                     "queue number must less equal to %d\n",
566                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
567                         return (-EINVAL);
568                 }
569         } else {
570                 /* For vmdb+dcb mode check our configuration before we go further */
571                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
572                         const struct rte_eth_vmdq_dcb_conf *conf;
573
574                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
575                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
576                                                 "!= %d\n",
577                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
578                                 return (-EINVAL);
579                         }
580                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
581                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
582                                conf->nb_queue_pools == ETH_32_POOLS)) {
583                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
584                                                 "nb_queue_pools must be %d or %d\n",
585                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
586                                 return (-EINVAL);
587                         }
588                 }
589                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
590                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
591
592                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
593                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
594                                                 "!= %d\n",
595                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
596                                 return (-EINVAL);
597                         }
598                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
599                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
600                                conf->nb_queue_pools == ETH_32_POOLS)) {
601                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
602                                                 "nb_queue_pools != %d or nb_queue_pools "
603                                                 "!= %d\n",
604                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
605                                 return (-EINVAL);
606                         }
607                 }
608
609                 /* For DCB mode check our configuration before we go further */
610                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
611                         const struct rte_eth_dcb_rx_conf *conf;
612
613                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
614                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
615                                                 "!= %d\n",
616                                                 port_id, ETH_DCB_NUM_QUEUES);
617                                 return (-EINVAL);
618                         }
619                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
620                         if (! (conf->nb_tcs == ETH_4_TCS ||
621                                conf->nb_tcs == ETH_8_TCS)) {
622                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
623                                                 "nb_tcs != %d or nb_tcs "
624                                                 "!= %d\n",
625                                                 port_id, ETH_4_TCS, ETH_8_TCS);
626                                 return (-EINVAL);
627                         }
628                 }
629
630                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
631                         const struct rte_eth_dcb_tx_conf *conf;
632
633                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
634                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
635                                                 "!= %d\n",
636                                                 port_id, ETH_DCB_NUM_QUEUES);
637                                 return (-EINVAL);
638                         }
639                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
640                         if (! (conf->nb_tcs == ETH_4_TCS ||
641                                conf->nb_tcs == ETH_8_TCS)) {
642                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
643                                                 "nb_tcs != %d or nb_tcs "
644                                                 "!= %d\n",
645                                                 port_id, ETH_4_TCS, ETH_8_TCS);
646                                 return (-EINVAL);
647                         }
648                 }
649         }
650         return 0;
651 }
652
653 int
654 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
655                       const struct rte_eth_conf *dev_conf)
656 {
657         struct rte_eth_dev *dev;
658         struct rte_eth_dev_info dev_info;
659         int diag;
660
661         /* This function is only safe when called from the primary process
662          * in a multi-process setup*/
663         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
664
665         if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
666                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
667                 return (-EINVAL);
668         }
669         dev = &rte_eth_devices[port_id];
670
671         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
672         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
673
674         if (dev->data->dev_started) {
675                 PMD_DEBUG_TRACE(
676                     "port %d must be stopped to allow configuration\n", port_id);
677                 return (-EBUSY);
678         }
679
680         /*
681          * Check that the numbers of RX and TX queues are not greater
682          * than the maximum number of RX and TX queues supported by the
683          * configured device.
684          */
685         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
686         if (nb_rx_q > dev_info.max_rx_queues) {
687                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
688                                 port_id, nb_rx_q, dev_info.max_rx_queues);
689                 return (-EINVAL);
690         }
691         if (nb_rx_q == 0) {
692                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
693                 return (-EINVAL);
694         }
695
696         if (nb_tx_q > dev_info.max_tx_queues) {
697                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
698                                 port_id, nb_tx_q, dev_info.max_tx_queues);
699                 return (-EINVAL);
700         }
701         if (nb_tx_q == 0) {
702                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
703                 return (-EINVAL);
704         }
705
706         /* Copy the dev_conf parameter into the dev structure */
707         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
708
709         /*
710          * If link state interrupt is enabled, check that the
711          * device supports it.
712          */
713         if (dev_conf->intr_conf.lsc == 1) {
714                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
715
716                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
717                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
718                                         pci_drv->name);
719                         return (-EINVAL);
720                 }
721         }
722
723         /*
724          * If jumbo frames are enabled, check that the maximum RX packet
725          * length is supported by the configured device.
726          */
727         if (dev_conf->rxmode.jumbo_frame == 1) {
728                 if (dev_conf->rxmode.max_rx_pkt_len >
729                     dev_info.max_rx_pktlen) {
730                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
731                                 " > max valid value %u\n",
732                                 port_id,
733                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
734                                 (unsigned)dev_info.max_rx_pktlen);
735                         return (-EINVAL);
736                 }
737                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
738                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
739                                 " < min valid value %u\n",
740                                 port_id,
741                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
742                                 (unsigned)ETHER_MIN_LEN);
743                         return (-EINVAL);
744                 }
745         } else {
746                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
747                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
748                         /* Use default value */
749                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
750                                                         ETHER_MAX_LEN;
751         }
752
753         /* multipe queue mode checking */
754         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
755         if (diag != 0) {
756                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
757                                 port_id, diag);
758                 return diag;
759         }
760
761         /*
762          * Setup new number of RX/TX queues and reconfigure device.
763          */
764         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
765         if (diag != 0) {
766                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
767                                 port_id, diag);
768                 return diag;
769         }
770
771         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
772         if (diag != 0) {
773                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
774                                 port_id, diag);
775                 rte_eth_dev_rx_queue_config(dev, 0);
776                 return diag;
777         }
778
779         diag = (*dev->dev_ops->dev_configure)(dev);
780         if (diag != 0) {
781                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
782                                 port_id, diag);
783                 rte_eth_dev_rx_queue_config(dev, 0);
784                 rte_eth_dev_tx_queue_config(dev, 0);
785                 return diag;
786         }
787
788         return 0;
789 }
790
791 static void
792 rte_eth_dev_config_restore(uint8_t port_id)
793 {
794         struct rte_eth_dev *dev;
795         struct rte_eth_dev_info dev_info;
796         struct ether_addr addr;
797         uint16_t i;
798         uint32_t pool = 0;
799
800         dev = &rte_eth_devices[port_id];
801
802         rte_eth_dev_info_get(port_id, &dev_info);
803
804         if (RTE_ETH_DEV_SRIOV(dev).active)
805                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
806
807         /* replay MAC address configuration */
808         for (i = 0; i < dev_info.max_mac_addrs; i++) {
809                 addr = dev->data->mac_addrs[i];
810
811                 /* skip zero address */
812                 if (is_zero_ether_addr(&addr))
813                         continue;
814
815                 /* add address to the hardware */
816                 if  (*dev->dev_ops->mac_addr_add)
817                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
818                 else {
819                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
820                                         port_id);
821                         /* exit the loop but not return an error */
822                         break;
823                 }
824         }
825
826         /* replay promiscuous configuration */
827         if (rte_eth_promiscuous_get(port_id) == 1)
828                 rte_eth_promiscuous_enable(port_id);
829         else if (rte_eth_promiscuous_get(port_id) == 0)
830                 rte_eth_promiscuous_disable(port_id);
831
832         /* replay allmulticast configuration */
833         if (rte_eth_allmulticast_get(port_id) == 1)
834                 rte_eth_allmulticast_enable(port_id);
835         else if (rte_eth_allmulticast_get(port_id) == 0)
836                 rte_eth_allmulticast_disable(port_id);
837 }
838
839 int
840 rte_eth_dev_start(uint8_t port_id)
841 {
842         struct rte_eth_dev *dev;
843         int diag;
844
845         /* This function is only safe when called from the primary process
846          * in a multi-process setup*/
847         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
848
849         if (port_id >= nb_ports) {
850                 PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
851                 return (-EINVAL);
852         }
853         dev = &rte_eth_devices[port_id];
854
855         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
856
857         if (dev->data->dev_started != 0) {
858                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
859                         " already started\n",
860                         port_id);
861                 return (0);
862         }
863
864         diag = (*dev->dev_ops->dev_start)(dev);
865         if (diag == 0)
866                 dev->data->dev_started = 1;
867         else
868                 return diag;
869
870         rte_eth_dev_config_restore(port_id);
871
872         return 0;
873 }
874
875 void
876 rte_eth_dev_stop(uint8_t port_id)
877 {
878         struct rte_eth_dev *dev;
879
880         /* This function is only safe when called from the primary process
881          * in a multi-process setup*/
882         PROC_PRIMARY_OR_RET();
883
884         if (port_id >= nb_ports) {
885                 PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
886                 return;
887         }
888         dev = &rte_eth_devices[port_id];
889
890         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
891
892         if (dev->data->dev_started == 0) {
893                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
894                         " already stopped\n",
895                         port_id);
896                 return;
897         }
898
899         dev->data->dev_started = 0;
900         (*dev->dev_ops->dev_stop)(dev);
901 }
902
903 int
904 rte_eth_dev_set_link_up(uint8_t port_id)
905 {
906         struct rte_eth_dev *dev;
907
908         /* This function is only safe when called from the primary process
909          * in a multi-process setup*/
910         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
911
912         if (port_id >= nb_ports) {
913                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
914                 return -EINVAL;
915         }
916         dev = &rte_eth_devices[port_id];
917
918         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
919         return (*dev->dev_ops->dev_set_link_up)(dev);
920 }
921
922 int
923 rte_eth_dev_set_link_down(uint8_t port_id)
924 {
925         struct rte_eth_dev *dev;
926
927         /* This function is only safe when called from the primary process
928          * in a multi-process setup*/
929         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
930
931         if (port_id >= nb_ports) {
932                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
933                 return -EINVAL;
934         }
935         dev = &rte_eth_devices[port_id];
936
937         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
938         return (*dev->dev_ops->dev_set_link_down)(dev);
939 }
940
941 void
942 rte_eth_dev_close(uint8_t port_id)
943 {
944         struct rte_eth_dev *dev;
945
946         /* This function is only safe when called from the primary process
947          * in a multi-process setup*/
948         PROC_PRIMARY_OR_RET();
949
950         if (port_id >= nb_ports) {
951                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
952                 return;
953         }
954
955         dev = &rte_eth_devices[port_id];
956
957         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
958         dev->data->dev_started = 0;
959         (*dev->dev_ops->dev_close)(dev);
960 }
961
962 int
963 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
964                        uint16_t nb_rx_desc, unsigned int socket_id,
965                        const struct rte_eth_rxconf *rx_conf,
966                        struct rte_mempool *mp)
967 {
968         int ret;
969         uint32_t mbp_buf_size;
970         struct rte_eth_dev *dev;
971         struct rte_pktmbuf_pool_private *mbp_priv;
972         struct rte_eth_dev_info dev_info;
973
974         /* This function is only safe when called from the primary process
975          * in a multi-process setup*/
976         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
977
978         if (port_id >= nb_ports) {
979                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
980                 return (-EINVAL);
981         }
982         dev = &rte_eth_devices[port_id];
983         if (rx_queue_id >= dev->data->nb_rx_queues) {
984                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
985                 return (-EINVAL);
986         }
987
988         if (dev->data->dev_started) {
989                 PMD_DEBUG_TRACE(
990                     "port %d must be stopped to allow configuration\n", port_id);
991                 return -EBUSY;
992         }
993
994         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
995         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
996
997         /*
998          * Check the size of the mbuf data buffer.
999          * This value must be provided in the private data of the memory pool.
1000          * First check that the memory pool has a valid private data.
1001          */
1002         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1003         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1004                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1005                                 mp->name, (int) mp->private_data_size,
1006                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1007                 return (-ENOSPC);
1008         }
1009         mbp_priv = rte_mempool_get_priv(mp);
1010         mbp_buf_size = mbp_priv->mbuf_data_room_size;
1011
1012         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1013                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1014                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1015                                 "=%d)\n",
1016                                 mp->name,
1017                                 (int)mbp_buf_size,
1018                                 (int)(RTE_PKTMBUF_HEADROOM +
1019                                       dev_info.min_rx_bufsize),
1020                                 (int)RTE_PKTMBUF_HEADROOM,
1021                                 (int)dev_info.min_rx_bufsize);
1022                 return (-EINVAL);
1023         }
1024
1025         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1026                                               socket_id, rx_conf, mp);
1027         if (!ret) {
1028                 if (!dev->data->min_rx_buf_size ||
1029                     dev->data->min_rx_buf_size > mbp_buf_size)
1030                         dev->data->min_rx_buf_size = mbp_buf_size;
1031         }
1032
1033         return ret;
1034 }
1035
1036 int
1037 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1038                        uint16_t nb_tx_desc, unsigned int socket_id,
1039                        const struct rte_eth_txconf *tx_conf)
1040 {
1041         struct rte_eth_dev *dev;
1042
1043         /* This function is only safe when called from the primary process
1044          * in a multi-process setup*/
1045         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1046
1047         if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
1048                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1049                 return (-EINVAL);
1050         }
1051         dev = &rte_eth_devices[port_id];
1052         if (tx_queue_id >= dev->data->nb_tx_queues) {
1053                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1054                 return (-EINVAL);
1055         }
1056
1057         if (dev->data->dev_started) {
1058                 PMD_DEBUG_TRACE(
1059                     "port %d must be stopped to allow configuration\n", port_id);
1060                 return -EBUSY;
1061         }
1062
1063         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1064         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1065                                                socket_id, tx_conf);
1066 }
1067
1068 void
1069 rte_eth_promiscuous_enable(uint8_t port_id)
1070 {
1071         struct rte_eth_dev *dev;
1072
1073         if (port_id >= nb_ports) {
1074                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1075                 return;
1076         }
1077         dev = &rte_eth_devices[port_id];
1078
1079         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1080         (*dev->dev_ops->promiscuous_enable)(dev);
1081         dev->data->promiscuous = 1;
1082 }
1083
1084 void
1085 rte_eth_promiscuous_disable(uint8_t port_id)
1086 {
1087         struct rte_eth_dev *dev;
1088
1089         if (port_id >= nb_ports) {
1090                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1091                 return;
1092         }
1093         dev = &rte_eth_devices[port_id];
1094
1095         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1096         dev->data->promiscuous = 0;
1097         (*dev->dev_ops->promiscuous_disable)(dev);
1098 }
1099
1100 int
1101 rte_eth_promiscuous_get(uint8_t port_id)
1102 {
1103         struct rte_eth_dev *dev;
1104
1105         if (port_id >= nb_ports) {
1106                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1107                 return -1;
1108         }
1109
1110         dev = &rte_eth_devices[port_id];
1111         return dev->data->promiscuous;
1112 }
1113
1114 void
1115 rte_eth_allmulticast_enable(uint8_t port_id)
1116 {
1117         struct rte_eth_dev *dev;
1118
1119         if (port_id >= nb_ports) {
1120                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1121                 return;
1122         }
1123         dev = &rte_eth_devices[port_id];
1124
1125         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1126         (*dev->dev_ops->allmulticast_enable)(dev);
1127         dev->data->all_multicast = 1;
1128 }
1129
1130 void
1131 rte_eth_allmulticast_disable(uint8_t port_id)
1132 {
1133         struct rte_eth_dev *dev;
1134
1135         if (port_id >= nb_ports) {
1136                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1137                 return;
1138         }
1139         dev = &rte_eth_devices[port_id];
1140
1141         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1142         dev->data->all_multicast = 0;
1143         (*dev->dev_ops->allmulticast_disable)(dev);
1144 }
1145
1146 int
1147 rte_eth_allmulticast_get(uint8_t port_id)
1148 {
1149         struct rte_eth_dev *dev;
1150
1151         if (port_id >= nb_ports) {
1152                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1153                 return -1;
1154         }
1155
1156         dev = &rte_eth_devices[port_id];
1157         return dev->data->all_multicast;
1158 }
1159
1160 static inline int
1161 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1162                                 struct rte_eth_link *link)
1163 {
1164         struct rte_eth_link *dst = link;
1165         struct rte_eth_link *src = &(dev->data->dev_link);
1166
1167         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1168                                         *(uint64_t *)src) == 0)
1169                 return -1;
1170
1171         return 0;
1172 }
1173
1174 void
1175 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1176 {
1177         struct rte_eth_dev *dev;
1178
1179         if (port_id >= nb_ports) {
1180                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1181                 return;
1182         }
1183         dev = &rte_eth_devices[port_id];
1184
1185         if (dev->data->dev_conf.intr_conf.lsc != 0)
1186                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1187         else {
1188                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1189                 (*dev->dev_ops->link_update)(dev, 1);
1190                 *eth_link = dev->data->dev_link;
1191         }
1192 }
1193
1194 void
1195 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1196 {
1197         struct rte_eth_dev *dev;
1198
1199         if (port_id >= nb_ports) {
1200                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1201                 return;
1202         }
1203         dev = &rte_eth_devices[port_id];
1204
1205         if (dev->data->dev_conf.intr_conf.lsc != 0)
1206                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1207         else {
1208                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1209                 (*dev->dev_ops->link_update)(dev, 0);
1210                 *eth_link = dev->data->dev_link;
1211         }
1212 }
1213
1214 void
1215 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1216 {
1217         struct rte_eth_dev *dev;
1218
1219         if (port_id >= nb_ports) {
1220                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1221                 return;
1222         }
1223         dev = &rte_eth_devices[port_id];
1224         memset(stats, 0, sizeof(*stats));
1225
1226         FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
1227         (*dev->dev_ops->stats_get)(dev, stats);
1228         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1229 }
1230
1231 void
1232 rte_eth_stats_reset(uint8_t port_id)
1233 {
1234         struct rte_eth_dev *dev;
1235
1236         if (port_id >= nb_ports) {
1237                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1238                 return;
1239         }
1240         dev = &rte_eth_devices[port_id];
1241
1242         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1243         (*dev->dev_ops->stats_reset)(dev);
1244 }
1245
1246 /* retrieve ethdev extended statistics */
1247 int
1248 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1249         unsigned n)
1250 {
1251         struct rte_eth_stats eth_stats;
1252         struct rte_eth_dev *dev;
1253         unsigned count, i, q;
1254         uint64_t val;
1255         char *stats_ptr;
1256
1257         if (port_id >= nb_ports) {
1258                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1259                 return -1;
1260         }
1261         dev = &rte_eth_devices[port_id];
1262
1263         /* implemented by the driver */
1264         if (dev->dev_ops->xstats_get != NULL)
1265                 return (*dev->dev_ops->xstats_get)(dev, xstats, n);
1266
1267         /* else, return generic statistics */
1268         count = RTE_NB_STATS;
1269         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1270         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1271         if (n < count)
1272                 return count;
1273
1274         /* now fill the xstats structure */
1275
1276         count = 0;
1277         memset(&eth_stats, 0, sizeof(eth_stats));
1278         rte_eth_stats_get(port_id, &eth_stats);
1279
1280         /* global stats */
1281         for (i = 0; i < RTE_NB_STATS; i++) {
1282                 stats_ptr = (char *)&eth_stats + rte_stats_strings[i].offset;
1283                 val = *(uint64_t *)stats_ptr;
1284                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1285                         "%s", rte_stats_strings[i].name);
1286                 xstats[count++].value = val;
1287         }
1288
1289         /* per-rxq stats */
1290         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1291                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1292                         stats_ptr = (char *)&eth_stats;
1293                         stats_ptr += rte_rxq_stats_strings[i].offset;
1294                         stats_ptr += q * sizeof(uint64_t);
1295                         val = *(uint64_t *)stats_ptr;
1296                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1297                                 "rx_queue_%u_%s", q,
1298                                 rte_rxq_stats_strings[i].name);
1299                         xstats[count++].value = val;
1300                 }
1301         }
1302
1303         /* per-txq stats */
1304         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1305                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1306                         stats_ptr = (char *)&eth_stats;
1307                         stats_ptr += rte_txq_stats_strings[i].offset;
1308                         stats_ptr += q * sizeof(uint64_t);
1309                         val = *(uint64_t *)stats_ptr;
1310                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1311                                 "tx_queue_%u_%s", q,
1312                                 rte_txq_stats_strings[i].name);
1313                         xstats[count++].value = val;
1314                 }
1315         }
1316
1317         return count;
1318 }
1319
1320 /* reset ethdev extended statistics */
1321 void
1322 rte_eth_xstats_reset(uint8_t port_id)
1323 {
1324         struct rte_eth_dev *dev;
1325
1326         if (port_id >= nb_ports) {
1327                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1328                 return;
1329         }
1330         dev = &rte_eth_devices[port_id];
1331
1332         /* implemented by the driver */
1333         if (dev->dev_ops->xstats_reset != NULL) {
1334                 (*dev->dev_ops->xstats_reset)(dev);
1335                 return;
1336         }
1337
1338         /* fallback to default */
1339         rte_eth_stats_reset(port_id);
1340 }
1341
1342 static int
1343 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1344                 uint8_t is_rx)
1345 {
1346         struct rte_eth_dev *dev;
1347
1348         if (port_id >= nb_ports) {
1349                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1350                 return -ENODEV;
1351         }
1352         dev = &rte_eth_devices[port_id];
1353
1354         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1355         return (*dev->dev_ops->queue_stats_mapping_set)
1356                         (dev, queue_id, stat_idx, is_rx);
1357 }
1358
1359
1360 int
1361 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1362                 uint8_t stat_idx)
1363 {
1364         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1365                         STAT_QMAP_TX);
1366 }
1367
1368
1369 int
1370 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1371                 uint8_t stat_idx)
1372 {
1373         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1374                         STAT_QMAP_RX);
1375 }
1376
1377
1378 void
1379 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1380 {
1381         struct rte_eth_dev *dev;
1382
1383         if (port_id >= nb_ports) {
1384                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1385                 return;
1386         }
1387         dev = &rte_eth_devices[port_id];
1388
1389         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1390
1391         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1392         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1393         dev_info->pci_dev = dev->pci_dev;
1394         if (dev->driver)
1395                 dev_info->driver_name = dev->driver->pci_drv.name;
1396 }
1397
1398 void
1399 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1400 {
1401         struct rte_eth_dev *dev;
1402
1403         if (port_id >= nb_ports) {
1404                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1405                 return;
1406         }
1407         dev = &rte_eth_devices[port_id];
1408         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1409 }
1410
1411
1412 int
1413 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1414 {
1415         struct rte_eth_dev *dev;
1416
1417         if (port_id >= nb_ports) {
1418                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1419                 return (-ENODEV);
1420         }
1421
1422         dev = &rte_eth_devices[port_id];
1423         *mtu = dev->data->mtu;
1424         return 0;
1425 }
1426
1427 int
1428 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1429 {
1430         int ret;
1431         struct rte_eth_dev *dev;
1432
1433         if (port_id >= nb_ports) {
1434                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1435                 return (-ENODEV);
1436         }
1437
1438         dev = &rte_eth_devices[port_id];
1439         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1440
1441         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1442         if (!ret)
1443                 dev->data->mtu = mtu;
1444
1445         return ret;
1446 }
1447
1448 int
1449 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1450 {
1451         struct rte_eth_dev *dev;
1452
1453         if (port_id >= nb_ports) {
1454                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1455                 return (-ENODEV);
1456         }
1457         dev = &rte_eth_devices[port_id];
1458         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1459                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1460                 return (-ENOSYS);
1461         }
1462
1463         if (vlan_id > 4095) {
1464                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1465                                 port_id, (unsigned) vlan_id);
1466                 return (-EINVAL);
1467         }
1468         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1469         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1470         return (0);
1471 }
1472
1473 int
1474 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1475 {
1476         struct rte_eth_dev *dev;
1477
1478         if (port_id >= nb_ports) {
1479                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1480                 return (-ENODEV);
1481         }
1482
1483         dev = &rte_eth_devices[port_id];
1484         if (rx_queue_id >= dev->data->nb_rx_queues) {
1485                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1486                 return (-EINVAL);
1487         }
1488
1489         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1490         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1491
1492         return (0);
1493 }
1494
1495 int
1496 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1497 {
1498         struct rte_eth_dev *dev;
1499
1500         if (port_id >= nb_ports) {
1501                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1502                 return (-ENODEV);
1503         }
1504
1505         dev = &rte_eth_devices[port_id];
1506         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1507         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1508
1509         return (0);
1510 }
1511
1512 int
1513 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1514 {
1515         struct rte_eth_dev *dev;
1516         int ret = 0;
1517         int mask = 0;
1518         int cur, org = 0;
1519
1520         if (port_id >= nb_ports) {
1521                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1522                 return (-ENODEV);
1523         }
1524
1525         dev = &rte_eth_devices[port_id];
1526
1527         /*check which option changed by application*/
1528         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1529         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1530         if (cur != org){
1531                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1532                 mask |= ETH_VLAN_STRIP_MASK;
1533         }
1534
1535         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1536         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1537         if (cur != org){
1538                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1539                 mask |= ETH_VLAN_FILTER_MASK;
1540         }
1541
1542         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1543         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1544         if (cur != org){
1545                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1546                 mask |= ETH_VLAN_EXTEND_MASK;
1547         }
1548
1549         /*no change*/
1550         if(mask == 0)
1551                 return ret;
1552
1553         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1554         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1555
1556         return ret;
1557 }
1558
1559 int
1560 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1561 {
1562         struct rte_eth_dev *dev;
1563         int ret = 0;
1564
1565         if (port_id >= nb_ports) {
1566                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1567                 return (-ENODEV);
1568         }
1569
1570         dev = &rte_eth_devices[port_id];
1571
1572         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1573                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1574
1575         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1576                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1577
1578         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1579                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1580
1581         return ret;
1582 }
1583
1584 int
1585 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1586 {
1587         struct rte_eth_dev *dev;
1588
1589         if (port_id >= nb_ports) {
1590                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1591                 return (-ENODEV);
1592         }
1593         dev = &rte_eth_devices[port_id];
1594         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1595         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1596
1597         return 0;
1598 }
1599
1600 int
1601 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1602                                       struct rte_fdir_filter *fdir_filter,
1603                                       uint8_t queue)
1604 {
1605         struct rte_eth_dev *dev;
1606
1607         if (port_id >= nb_ports) {
1608                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1609                 return (-ENODEV);
1610         }
1611
1612         dev = &rte_eth_devices[port_id];
1613
1614         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1615                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1616                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1617                 return (-ENOSYS);
1618         }
1619
1620         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1621              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1622             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1623                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1624                                 "None l4type, source & destinations ports " \
1625                                 "should be null!\n");
1626                 return (-EINVAL);
1627         }
1628
1629         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1630         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1631                                                                 queue);
1632 }
1633
1634 int
1635 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1636                                          struct rte_fdir_filter *fdir_filter,
1637                                          uint8_t queue)
1638 {
1639         struct rte_eth_dev *dev;
1640
1641         if (port_id >= nb_ports) {
1642                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1643                 return (-ENODEV);
1644         }
1645
1646         dev = &rte_eth_devices[port_id];
1647
1648         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1649                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1650                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1651                 return (-ENOSYS);
1652         }
1653
1654         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1655              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1656             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1657                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1658                                 "None l4type, source & destinations ports " \
1659                                 "should be null!\n");
1660                 return (-EINVAL);
1661         }
1662
1663         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1664         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1665                                                                 queue);
1666
1667 }
1668
1669 int
1670 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1671                                          struct rte_fdir_filter *fdir_filter)
1672 {
1673         struct rte_eth_dev *dev;
1674
1675         if (port_id >= nb_ports) {
1676                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1677                 return (-ENODEV);
1678         }
1679
1680         dev = &rte_eth_devices[port_id];
1681
1682         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1683                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1684                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1685                 return (-ENOSYS);
1686         }
1687
1688         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1689              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1690             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1691                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1692                                 "None l4type source & destinations ports " \
1693                                 "should be null!\n");
1694                 return (-EINVAL);
1695         }
1696
1697         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1698         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1699 }
1700
1701 int
1702 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1703 {
1704         struct rte_eth_dev *dev;
1705
1706         if (port_id >= nb_ports) {
1707                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1708                 return (-ENODEV);
1709         }
1710
1711         dev = &rte_eth_devices[port_id];
1712         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1713                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1714                 return (-ENOSYS);
1715         }
1716
1717         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1718
1719         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1720         return (0);
1721 }
1722
1723 int
1724 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1725                                     struct rte_fdir_filter *fdir_filter,
1726                                     uint16_t soft_id, uint8_t queue,
1727                                     uint8_t drop)
1728 {
1729         struct rte_eth_dev *dev;
1730
1731         if (port_id >= nb_ports) {
1732                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1733                 return (-ENODEV);
1734         }
1735
1736         dev = &rte_eth_devices[port_id];
1737
1738         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1739                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1740                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1741                 return (-ENOSYS);
1742         }
1743
1744         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1745              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1746             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1747                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1748                                 "None l4type, source & destinations ports " \
1749                                 "should be null!\n");
1750                 return (-EINVAL);
1751         }
1752
1753         /* For now IPv6 is not supported with perfect filter */
1754         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1755                 return (-ENOTSUP);
1756
1757         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1758         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1759                                                                 soft_id, queue,
1760                                                                 drop);
1761 }
1762
1763 int
1764 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1765                                        struct rte_fdir_filter *fdir_filter,
1766                                        uint16_t soft_id, uint8_t queue,
1767                                        uint8_t drop)
1768 {
1769         struct rte_eth_dev *dev;
1770
1771         if (port_id >= nb_ports) {
1772                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1773                 return (-ENODEV);
1774         }
1775
1776         dev = &rte_eth_devices[port_id];
1777
1778         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1779                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1780                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1781                 return (-ENOSYS);
1782         }
1783
1784         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1785              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1786             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1787                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1788                                 "None l4type, source & destinations ports " \
1789                                 "should be null!\n");
1790                 return (-EINVAL);
1791         }
1792
1793         /* For now IPv6 is not supported with perfect filter */
1794         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1795                 return (-ENOTSUP);
1796
1797         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1798         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1799                                                         soft_id, queue, drop);
1800 }
1801
1802 int
1803 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1804                                        struct rte_fdir_filter *fdir_filter,
1805                                        uint16_t soft_id)
1806 {
1807         struct rte_eth_dev *dev;
1808
1809         if (port_id >= nb_ports) {
1810                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1811                 return (-ENODEV);
1812         }
1813
1814         dev = &rte_eth_devices[port_id];
1815
1816         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1817                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1818                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1819                 return (-ENOSYS);
1820         }
1821
1822         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1823              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1824             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1825                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1826                                 "None l4type, source & destinations ports " \
1827                                 "should be null!\n");
1828                 return (-EINVAL);
1829         }
1830
1831         /* For now IPv6 is not supported with perfect filter */
1832         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1833                 return (-ENOTSUP);
1834
1835         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1836         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
1837                                                                 soft_id);
1838 }
1839
1840 int
1841 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1842 {
1843         struct rte_eth_dev *dev;
1844
1845         if (port_id >= nb_ports) {
1846                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1847                 return (-ENODEV);
1848         }
1849
1850         dev = &rte_eth_devices[port_id];
1851         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1852                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1853                 return (-ENOSYS);
1854         }
1855
1856         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1857         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1858 }
1859
1860 int
1861 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1862 {
1863         struct rte_eth_dev *dev;
1864
1865         if (port_id >= nb_ports) {
1866                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1867                 return (-ENODEV);
1868         }
1869
1870         dev = &rte_eth_devices[port_id];
1871         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1872         memset(fc_conf, 0, sizeof(*fc_conf));
1873         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1874 }
1875
1876 int
1877 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1878 {
1879         struct rte_eth_dev *dev;
1880
1881         if (port_id >= nb_ports) {
1882                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1883                 return (-ENODEV);
1884         }
1885
1886         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1887                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1888                 return (-EINVAL);
1889         }
1890
1891         dev = &rte_eth_devices[port_id];
1892         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1893         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1894 }
1895
1896 int
1897 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1898 {
1899         struct rte_eth_dev *dev;
1900
1901         if (port_id >= nb_ports) {
1902                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1903                 return (-ENODEV);
1904         }
1905
1906         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1907                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1908                 return (-EINVAL);
1909         }
1910
1911         dev = &rte_eth_devices[port_id];
1912         /* High water, low water validation are device specific */
1913         if  (*dev->dev_ops->priority_flow_ctrl_set)
1914                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1915         return (-ENOTSUP);
1916 }
1917
1918 int
1919 rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1920 {
1921         struct rte_eth_dev *dev;
1922         uint16_t max_rxq;
1923         uint8_t i,j;
1924
1925         if (port_id >= nb_ports) {
1926                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1927                 return (-ENODEV);
1928         }
1929
1930         /* Invalid mask bit(s) setting */
1931         if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1932                 PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
1933                 return (-EINVAL);
1934         }
1935
1936         dev = &rte_eth_devices[port_id];
1937         max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ?
1938                 dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE;
1939         if (reta_conf->mask_lo != 0) {
1940                 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1941                         if ((reta_conf->mask_lo & (1ULL << i)) &&
1942                                 (reta_conf->reta[i] >= max_rxq)) {
1943                                 PMD_DEBUG_TRACE("RETA hash index output"
1944                                         "configration for port=%d,invalid"
1945                                         "queue=%d\n",port_id,reta_conf->reta[i]);
1946
1947                                 return (-EINVAL);
1948                         }
1949                 }
1950         }
1951
1952         if (reta_conf->mask_hi != 0) {
1953                 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1954                         j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
1955
1956                         /* Check if the max entry >= 128 */
1957                         if ((reta_conf->mask_hi & (1ULL << i)) &&
1958                                 (reta_conf->reta[j] >= max_rxq)) {
1959                                 PMD_DEBUG_TRACE("RETA hash index output"
1960                                         "configration for port=%d,invalid"
1961                                         "queue=%d\n",port_id,reta_conf->reta[j]);
1962
1963                                 return (-EINVAL);
1964                         }
1965                 }
1966         }
1967
1968         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1969         return (*dev->dev_ops->reta_update)(dev, reta_conf);
1970 }
1971
1972 int
1973 rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1974 {
1975         struct rte_eth_dev *dev;
1976
1977         if (port_id >= nb_ports) {
1978                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1979                 return (-ENODEV);
1980         }
1981
1982         if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1983                 PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
1984                 return (-EINVAL);
1985         }
1986
1987         dev = &rte_eth_devices[port_id];
1988         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1989         return (*dev->dev_ops->reta_query)(dev, reta_conf);
1990 }
1991
1992 int
1993 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1994 {
1995         struct rte_eth_dev *dev;
1996         uint16_t rss_hash_protos;
1997
1998         if (port_id >= nb_ports) {
1999                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2000                 return (-ENODEV);
2001         }
2002         rss_hash_protos = rss_conf->rss_hf;
2003         if ((rss_hash_protos != 0) &&
2004             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2005                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2006                                 rss_hash_protos);
2007                 return (-EINVAL);
2008         }
2009         dev = &rte_eth_devices[port_id];
2010         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2011         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2012 }
2013
2014 int
2015 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2016                               struct rte_eth_rss_conf *rss_conf)
2017 {
2018         struct rte_eth_dev *dev;
2019
2020         if (port_id >= nb_ports) {
2021                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2022                 return (-ENODEV);
2023         }
2024         dev = &rte_eth_devices[port_id];
2025         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2026         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2027 }
2028
2029 int
2030 rte_eth_led_on(uint8_t port_id)
2031 {
2032         struct rte_eth_dev *dev;
2033
2034         if (port_id >= nb_ports) {
2035                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2036                 return (-ENODEV);
2037         }
2038
2039         dev = &rte_eth_devices[port_id];
2040         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2041         return ((*dev->dev_ops->dev_led_on)(dev));
2042 }
2043
2044 int
2045 rte_eth_led_off(uint8_t port_id)
2046 {
2047         struct rte_eth_dev *dev;
2048
2049         if (port_id >= nb_ports) {
2050                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2051                 return (-ENODEV);
2052         }
2053
2054         dev = &rte_eth_devices[port_id];
2055         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2056         return ((*dev->dev_ops->dev_led_off)(dev));
2057 }
2058
2059 /*
2060  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2061  * an empty spot.
2062  */
2063 static inline int
2064 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2065 {
2066         struct rte_eth_dev_info dev_info;
2067         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2068         unsigned i;
2069
2070         rte_eth_dev_info_get(port_id, &dev_info);
2071
2072         for (i = 0; i < dev_info.max_mac_addrs; i++)
2073                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2074                         return i;
2075
2076         return -1;
2077 }
2078
2079 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
2080
2081 int
2082 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2083                         uint32_t pool)
2084 {
2085         struct rte_eth_dev *dev;
2086         int index;
2087         uint64_t pool_mask;
2088
2089         if (port_id >= nb_ports) {
2090                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2091                 return (-ENODEV);
2092         }
2093         dev = &rte_eth_devices[port_id];
2094         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2095
2096         if (is_zero_ether_addr(addr)) {
2097                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2098                         port_id);
2099                 return (-EINVAL);
2100         }
2101         if (pool >= ETH_64_POOLS) {
2102                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
2103                 return (-EINVAL);
2104         }
2105
2106         index = get_mac_addr_index(port_id, addr);
2107         if (index < 0) {
2108                 index = get_mac_addr_index(port_id, &null_mac_addr);
2109                 if (index < 0) {
2110                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2111                                 port_id);
2112                         return (-ENOSPC);
2113                 }
2114         } else {
2115                 pool_mask = dev->data->mac_pool_sel[index];
2116
2117                 /* Check if both MAC address and pool is alread there, and do nothing */
2118                 if (pool_mask & (1ULL << pool))
2119                         return 0;
2120         }
2121
2122         /* Update NIC */
2123         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2124
2125         /* Update address in NIC data structure */
2126         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2127
2128         /* Update pool bitmap in NIC data structure */
2129         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2130
2131         return 0;
2132 }
2133
2134 int
2135 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2136 {
2137         struct rte_eth_dev *dev;
2138         int index;
2139
2140         if (port_id >= nb_ports) {
2141                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2142                 return (-ENODEV);
2143         }
2144         dev = &rte_eth_devices[port_id];
2145         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2146
2147         index = get_mac_addr_index(port_id, addr);
2148         if (index == 0) {
2149                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2150                 return (-EADDRINUSE);
2151         } else if (index < 0)
2152                 return 0;  /* Do nothing if address wasn't found */
2153
2154         /* Update NIC */
2155         (*dev->dev_ops->mac_addr_remove)(dev, index);
2156
2157         /* Update address in NIC data structure */
2158         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2159
2160         return 0;
2161 }
2162
2163 int
2164 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2165                                 uint16_t rx_mode, uint8_t on)
2166 {
2167         uint16_t num_vfs;
2168         struct rte_eth_dev *dev;
2169         struct rte_eth_dev_info dev_info;
2170
2171         if (port_id >= nb_ports) {
2172                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
2173                                 port_id);
2174                 return (-ENODEV);
2175         }
2176
2177         dev = &rte_eth_devices[port_id];
2178         rte_eth_dev_info_get(port_id, &dev_info);
2179
2180         num_vfs = dev_info.max_vfs;
2181         if (vf > num_vfs)
2182         {
2183                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2184                 return (-EINVAL);
2185         }
2186         if (rx_mode == 0)
2187         {
2188                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2189                 return (-EINVAL);
2190         }
2191         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2192         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2193 }
2194
2195 /*
2196  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2197  * an empty spot.
2198  */
2199 static inline int
2200 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2201 {
2202         struct rte_eth_dev_info dev_info;
2203         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2204         unsigned i;
2205
2206         rte_eth_dev_info_get(port_id, &dev_info);
2207         if (!dev->data->hash_mac_addrs)
2208                 return -1;
2209
2210         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2211                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2212                         ETHER_ADDR_LEN) == 0)
2213                         return i;
2214
2215         return -1;
2216 }
2217
2218 int
2219 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2220                                 uint8_t on)
2221 {
2222         int index;
2223         int ret;
2224         struct rte_eth_dev *dev;
2225
2226         if (port_id >= nb_ports) {
2227                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2228                         port_id);
2229                 return (-ENODEV);
2230         }
2231
2232         dev = &rte_eth_devices[port_id];
2233         if (is_zero_ether_addr(addr)) {
2234                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2235                         port_id);
2236                 return (-EINVAL);
2237         }
2238
2239         index = get_hash_mac_addr_index(port_id, addr);
2240         /* Check if it's already there, and do nothing */
2241         if ((index >= 0) && (on))
2242                 return 0;
2243
2244         if (index < 0) {
2245                 if (!on) {
2246                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
2247                                 "set in UTA\n", port_id);
2248                         return (-EINVAL);
2249                 }
2250
2251                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2252                 if (index < 0) {
2253                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2254                                         port_id);
2255                         return (-ENOSPC);
2256                 }
2257         }
2258
2259         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2260         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2261         if (ret == 0) {
2262                 /* Update address in NIC data structure */
2263                 if (on)
2264                         ether_addr_copy(addr,
2265                                         &dev->data->hash_mac_addrs[index]);
2266                 else
2267                         ether_addr_copy(&null_mac_addr,
2268                                         &dev->data->hash_mac_addrs[index]);
2269         }
2270
2271         return ret;
2272 }
2273
2274 int
2275 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2276 {
2277         struct rte_eth_dev *dev;
2278
2279         if (port_id >= nb_ports) {
2280                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2281                         port_id);
2282                 return (-ENODEV);
2283         }
2284
2285         dev = &rte_eth_devices[port_id];
2286
2287         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2288         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2289 }
2290
2291 int
2292 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
2293 {
2294         uint16_t num_vfs;
2295         struct rte_eth_dev *dev;
2296         struct rte_eth_dev_info dev_info;
2297
2298         if (port_id >= nb_ports) {
2299                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2300                 return (-ENODEV);
2301         }
2302
2303         dev = &rte_eth_devices[port_id];
2304         rte_eth_dev_info_get(port_id, &dev_info);
2305
2306         num_vfs = dev_info.max_vfs;
2307         if (vf > num_vfs)
2308         {
2309                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2310                 return (-EINVAL);
2311         }
2312
2313         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2314         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2315 }
2316
2317 int
2318 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2319 {
2320         uint16_t num_vfs;
2321         struct rte_eth_dev *dev;
2322         struct rte_eth_dev_info dev_info;
2323
2324         if (port_id >= nb_ports) {
2325                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2326                 return (-ENODEV);
2327         }
2328
2329         dev = &rte_eth_devices[port_id];
2330         rte_eth_dev_info_get(port_id, &dev_info);
2331
2332         num_vfs = dev_info.max_vfs;
2333         if (vf > num_vfs)
2334         {
2335                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2336                 return (-EINVAL);
2337         }
2338
2339         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2340         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2341 }
2342
2343 int
2344 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2345                                  uint64_t vf_mask,uint8_t vlan_on)
2346 {
2347         struct rte_eth_dev *dev;
2348
2349         if (port_id >= nb_ports) {
2350                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2351                                 port_id);
2352                 return (-ENODEV);
2353         }
2354         dev = &rte_eth_devices[port_id];
2355
2356         if(vlan_id > ETHER_MAX_VLAN_ID)
2357         {
2358                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2359                         vlan_id);
2360                 return (-EINVAL);
2361         }
2362         if (vf_mask == 0)
2363         {
2364                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2365                 return (-EINVAL);
2366         }
2367
2368         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2369         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2370                                                 vf_mask,vlan_on);
2371 }
2372
2373 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2374                                         uint16_t tx_rate)
2375 {
2376         struct rte_eth_dev *dev;
2377         struct rte_eth_dev_info dev_info;
2378         struct rte_eth_link link;
2379
2380         if (port_id >= nb_ports) {
2381                 PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
2382                                 port_id);
2383                 return -ENODEV;
2384         }
2385
2386         dev = &rte_eth_devices[port_id];
2387         rte_eth_dev_info_get(port_id, &dev_info);
2388         link = dev->data->dev_link;
2389
2390         if (queue_idx > dev_info.max_tx_queues) {
2391                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2392                                 "invalid queue id=%d\n", port_id, queue_idx);
2393                 return -EINVAL;
2394         }
2395
2396         if (tx_rate > link.link_speed) {
2397                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2398                                 "bigger than link speed= %d\n",
2399                         tx_rate, link.link_speed);
2400                 return -EINVAL;
2401         }
2402
2403         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2404         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2405 }
2406
2407 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2408                                 uint64_t q_msk)
2409 {
2410         struct rte_eth_dev *dev;
2411         struct rte_eth_dev_info dev_info;
2412         struct rte_eth_link link;
2413
2414         if (q_msk == 0)
2415                 return 0;
2416
2417         if (port_id >= nb_ports) {
2418                 PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
2419                                 port_id);
2420                 return -ENODEV;
2421         }
2422
2423         dev = &rte_eth_devices[port_id];
2424         rte_eth_dev_info_get(port_id, &dev_info);
2425         link = dev->data->dev_link;
2426
2427         if (vf > dev_info.max_vfs) {
2428                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2429                                 "invalid vf id=%d\n", port_id, vf);
2430                 return -EINVAL;
2431         }
2432
2433         if (tx_rate > link.link_speed) {
2434                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2435                                 "bigger than link speed= %d\n",
2436                                 tx_rate, link.link_speed);
2437                 return -EINVAL;
2438         }
2439
2440         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2441         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2442 }
2443
2444 int
2445 rte_eth_mirror_rule_set(uint8_t port_id,
2446                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
2447                         uint8_t rule_id, uint8_t on)
2448 {
2449         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2450
2451         if (port_id >= nb_ports) {
2452                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2453                 return (-ENODEV);
2454         }
2455
2456         if (mirror_conf->rule_type_mask == 0) {
2457                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2458                 return (-EINVAL);
2459         }
2460
2461         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2462                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
2463                         "be 0-%d\n",ETH_64_POOLS - 1);
2464                 return (-EINVAL);
2465         }
2466
2467         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
2468                 (mirror_conf->pool_mask == 0)) {
2469                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
2470                                 "be 0.\n");
2471                 return (-EINVAL);
2472         }
2473
2474         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2475         {
2476                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2477                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
2478                 return (-EINVAL);
2479         }
2480
2481         dev = &rte_eth_devices[port_id];
2482         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2483
2484         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2485 }
2486
2487 int
2488 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2489 {
2490         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2491
2492         if (port_id >= nb_ports) {
2493                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2494                 return (-ENODEV);
2495         }
2496
2497         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2498         {
2499                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2500                         ETH_VMDQ_NUM_MIRROR_RULE-1);
2501                 return (-EINVAL);
2502         }
2503
2504         dev = &rte_eth_devices[port_id];
2505         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2506
2507         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2508 }
2509
2510 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2511 uint16_t
2512 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2513                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2514 {
2515         struct rte_eth_dev *dev;
2516
2517         if (port_id >= nb_ports) {
2518                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2519                 return 0;
2520         }
2521         dev = &rte_eth_devices[port_id];
2522         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
2523         if (queue_id >= dev->data->nb_rx_queues) {
2524                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2525                 return 0;
2526         }
2527         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2528                                                 rx_pkts, nb_pkts);
2529 }
2530
2531 uint16_t
2532 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2533                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2534 {
2535         struct rte_eth_dev *dev;
2536
2537         if (port_id >= nb_ports) {
2538                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2539                 return 0;
2540         }
2541         dev = &rte_eth_devices[port_id];
2542
2543         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
2544         if (queue_id >= dev->data->nb_tx_queues) {
2545                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2546                 return 0;
2547         }
2548         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2549                                                 tx_pkts, nb_pkts);
2550 }
2551
2552 uint32_t
2553 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2554 {
2555         struct rte_eth_dev *dev;
2556
2557         if (port_id >= nb_ports) {
2558                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2559                 return 0;
2560         }
2561         dev = &rte_eth_devices[port_id];
2562         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2563         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2564 }
2565
2566 int
2567 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2568 {
2569         struct rte_eth_dev *dev;
2570
2571         if (port_id >= nb_ports) {
2572                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2573                 return (-ENODEV);
2574         }
2575         dev = &rte_eth_devices[port_id];
2576         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2577         return (*dev->dev_ops->rx_descriptor_done)( \
2578                 dev->data->rx_queues[queue_id], offset);
2579 }
2580 #endif
2581
2582 int
2583 rte_eth_dev_callback_register(uint8_t port_id,
2584                         enum rte_eth_event_type event,
2585                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2586 {
2587         struct rte_eth_dev *dev;
2588         struct rte_eth_dev_callback *user_cb;
2589
2590         if (!cb_fn)
2591                 return (-EINVAL);
2592         if (port_id >= nb_ports) {
2593                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2594                 return (-EINVAL);
2595         }
2596
2597         dev = &rte_eth_devices[port_id];
2598         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2599
2600         TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
2601                 if (user_cb->cb_fn == cb_fn &&
2602                         user_cb->cb_arg == cb_arg &&
2603                         user_cb->event == event) {
2604                         break;
2605                 }
2606         }
2607
2608         /* create a new callback. */
2609         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2610                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2611                 user_cb->cb_fn = cb_fn;
2612                 user_cb->cb_arg = cb_arg;
2613                 user_cb->event = event;
2614                 TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
2615         }
2616
2617         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2618         return ((user_cb == NULL) ? -ENOMEM : 0);
2619 }
2620
2621 int
2622 rte_eth_dev_callback_unregister(uint8_t port_id,
2623                         enum rte_eth_event_type event,
2624                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2625 {
2626         int ret;
2627         struct rte_eth_dev *dev;
2628         struct rte_eth_dev_callback *cb, *next;
2629
2630         if (!cb_fn)
2631                 return (-EINVAL);
2632         if (port_id >= nb_ports) {
2633                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2634                 return (-EINVAL);
2635         }
2636
2637         dev = &rte_eth_devices[port_id];
2638         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2639
2640         ret = 0;
2641         for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
2642
2643                 next = TAILQ_NEXT(cb, next);
2644
2645                 if (cb->cb_fn != cb_fn || cb->event != event ||
2646                                 (cb->cb_arg != (void *)-1 &&
2647                                 cb->cb_arg != cb_arg))
2648                         continue;
2649
2650                 /*
2651                  * if this callback is not executing right now,
2652                  * then remove it.
2653                  */
2654                 if (cb->active == 0) {
2655                         TAILQ_REMOVE(&(dev->callbacks), cb, next);
2656                         rte_free(cb);
2657                 } else {
2658                         ret = -EAGAIN;
2659                 }
2660         }
2661
2662         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2663         return (ret);
2664 }
2665
2666 void
2667 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2668         enum rte_eth_event_type event)
2669 {
2670         struct rte_eth_dev_callback *cb_lst;
2671         struct rte_eth_dev_callback dev_cb;
2672
2673         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2674         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
2675                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2676                         continue;
2677                 dev_cb = *cb_lst;
2678                 cb_lst->active = 1;
2679                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2680                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2681                                                 dev_cb.cb_arg);
2682                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2683                 cb_lst->active = 0;
2684         }
2685         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2686 }
2687 #ifdef RTE_NIC_BYPASS
2688 int rte_eth_dev_bypass_init(uint8_t port_id)
2689 {
2690         struct rte_eth_dev *dev;
2691
2692         if (port_id >= nb_ports) {
2693                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2694                 return (-ENODEV);
2695         }
2696
2697         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2698                 PMD_DEBUG_TRACE("Invalid port device\n");
2699                 return (-ENODEV);
2700         }
2701
2702         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2703         (*dev->dev_ops->bypass_init)(dev);
2704         return 0;
2705 }
2706
2707 int
2708 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2709 {
2710         struct rte_eth_dev *dev;
2711
2712         if (port_id >= nb_ports) {
2713                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2714                 return (-ENODEV);
2715         }
2716
2717         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2718                 PMD_DEBUG_TRACE("Invalid port device\n");
2719                 return (-ENODEV);
2720         }
2721         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2722         (*dev->dev_ops->bypass_state_show)(dev, state);
2723         return 0;
2724 }
2725
2726 int
2727 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2728 {
2729         struct rte_eth_dev *dev;
2730
2731         if (port_id >= nb_ports) {
2732                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2733                 return (-ENODEV);
2734         }
2735
2736         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2737                 PMD_DEBUG_TRACE("Invalid port device\n");
2738                 return (-ENODEV);
2739         }
2740
2741         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2742         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2743         return 0;
2744 }
2745
2746 int
2747 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2748 {
2749         struct rte_eth_dev *dev;
2750
2751         if (port_id >= nb_ports) {
2752                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2753                 return (-ENODEV);
2754         }
2755
2756         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2757                 PMD_DEBUG_TRACE("Invalid port device\n");
2758                 return (-ENODEV);
2759         }
2760
2761         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2762         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2763         return 0;
2764 }
2765
2766 int
2767 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2768 {
2769         struct rte_eth_dev *dev;
2770
2771         if (port_id >= nb_ports) {
2772                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2773                 return (-ENODEV);
2774         }
2775
2776         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2777                 PMD_DEBUG_TRACE("Invalid port device\n");
2778                 return (-ENODEV);
2779         }
2780
2781         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2782         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2783         return 0;
2784 }
2785
2786 int
2787 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2788 {
2789         struct rte_eth_dev *dev;
2790
2791         if (port_id >= nb_ports) {
2792                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2793                 return (-ENODEV);
2794         }
2795
2796         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2797                 PMD_DEBUG_TRACE("Invalid port device\n");
2798                 return (-ENODEV);
2799         }
2800
2801         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2802         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2803         return 0;
2804 }
2805
2806 int
2807 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2808 {
2809         struct rte_eth_dev *dev;
2810
2811         if (port_id >= nb_ports) {
2812                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2813                 return (-ENODEV);
2814         }
2815
2816         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2817                 PMD_DEBUG_TRACE("Invalid port device\n");
2818                 return (-ENODEV);
2819         }
2820
2821         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2822         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2823         return 0;
2824 }
2825
2826 int
2827 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2828 {
2829         struct rte_eth_dev *dev;
2830
2831         if (port_id >= nb_ports) {
2832                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2833                 return (-ENODEV);
2834         }
2835
2836         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2837                 PMD_DEBUG_TRACE("Invalid port device\n");
2838                 return (-ENODEV);
2839         }
2840
2841         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2842         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2843         return 0;
2844 }
2845
2846 int
2847 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2848 {
2849         struct rte_eth_dev *dev;
2850
2851         if (port_id >= nb_ports) {
2852                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2853                 return (-ENODEV);
2854         }
2855
2856         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2857                 PMD_DEBUG_TRACE("Invalid port device\n");
2858                 return (-ENODEV);
2859         }
2860
2861         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2862         (*dev->dev_ops->bypass_wd_reset)(dev);
2863         return 0;
2864 }
2865 #endif
2866
2867 int
2868 rte_eth_dev_add_syn_filter(uint8_t port_id,
2869                         struct rte_syn_filter *filter, uint16_t rx_queue)
2870 {
2871         struct rte_eth_dev *dev;
2872
2873         if (port_id >= nb_ports) {
2874                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2875                 return -ENODEV;
2876         }
2877
2878         dev = &rte_eth_devices[port_id];
2879         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_syn_filter, -ENOTSUP);
2880         return (*dev->dev_ops->add_syn_filter)(dev, filter, rx_queue);
2881 }
2882
2883 int
2884 rte_eth_dev_remove_syn_filter(uint8_t port_id)
2885 {
2886         struct rte_eth_dev *dev;
2887
2888         if (port_id >= nb_ports) {
2889                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2890                 return -ENODEV;
2891         }
2892
2893         dev = &rte_eth_devices[port_id];
2894         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_syn_filter, -ENOTSUP);
2895         return (*dev->dev_ops->remove_syn_filter)(dev);
2896 }
2897
2898 int
2899 rte_eth_dev_get_syn_filter(uint8_t port_id,
2900                         struct rte_syn_filter *filter, uint16_t *rx_queue)
2901 {
2902         struct rte_eth_dev *dev;
2903
2904         if (filter == NULL || rx_queue == NULL)
2905                 return -EINVAL;
2906
2907         if (port_id >= nb_ports) {
2908                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2909                 return -ENODEV;
2910         }
2911
2912         dev = &rte_eth_devices[port_id];
2913         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_syn_filter, -ENOTSUP);
2914         return (*dev->dev_ops->get_syn_filter)(dev, filter, rx_queue);
2915 }
2916
2917 int
2918 rte_eth_dev_add_ethertype_filter(uint8_t port_id, uint16_t index,
2919                         struct rte_ethertype_filter *filter, uint16_t rx_queue)
2920 {
2921         struct rte_eth_dev *dev;
2922
2923         if (port_id >= nb_ports) {
2924                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2925                 return -ENODEV;
2926         }
2927         if (filter->ethertype == ETHER_TYPE_IPv4 ||
2928                 filter->ethertype == ETHER_TYPE_IPv6){
2929                 PMD_DEBUG_TRACE("IP and IPv6 are not supported"
2930                         " in ethertype filter\n");
2931                 return -EINVAL;
2932         }
2933         dev = &rte_eth_devices[port_id];
2934         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_ethertype_filter, -ENOTSUP);
2935         return (*dev->dev_ops->add_ethertype_filter)(dev, index,
2936                                         filter, rx_queue);
2937 }
2938
2939 int
2940 rte_eth_dev_remove_ethertype_filter(uint8_t port_id,  uint16_t index)
2941 {
2942         struct rte_eth_dev *dev;
2943
2944         if (port_id >= nb_ports) {
2945                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2946                 return -ENODEV;
2947         }
2948
2949         dev = &rte_eth_devices[port_id];
2950         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_ethertype_filter, -ENOTSUP);
2951         return (*dev->dev_ops->remove_ethertype_filter)(dev, index);
2952 }
2953
2954 int
2955 rte_eth_dev_get_ethertype_filter(uint8_t port_id, uint16_t index,
2956                         struct rte_ethertype_filter *filter, uint16_t *rx_queue)
2957 {
2958         struct rte_eth_dev *dev;
2959
2960         if (filter == NULL || rx_queue == NULL)
2961                 return -EINVAL;
2962
2963         if (port_id >= nb_ports) {
2964                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2965                 return -ENODEV;
2966         }
2967
2968         dev = &rte_eth_devices[port_id];
2969         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_ethertype_filter, -ENOTSUP);
2970         return (*dev->dev_ops->get_ethertype_filter)(dev, index,
2971                                                 filter, rx_queue);
2972 }
2973
2974 int
2975 rte_eth_dev_add_2tuple_filter(uint8_t port_id, uint16_t index,
2976                         struct rte_2tuple_filter *filter, uint16_t rx_queue)
2977 {
2978         struct rte_eth_dev *dev;
2979
2980         if (port_id >= nb_ports) {
2981                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2982                 return -ENODEV;
2983         }
2984         if (filter->protocol != IPPROTO_TCP &&
2985                 filter->tcp_flags != 0){
2986                 PMD_DEBUG_TRACE("tcp flags is 0x%x, but the protocol value"
2987                         " is not TCP\n",
2988                         filter->tcp_flags);
2989                 return -EINVAL;
2990         }
2991
2992         dev = &rte_eth_devices[port_id];
2993         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_2tuple_filter, -ENOTSUP);
2994         return (*dev->dev_ops->add_2tuple_filter)(dev, index, filter, rx_queue);
2995 }
2996
2997 int
2998 rte_eth_dev_remove_2tuple_filter(uint8_t port_id, uint16_t index)
2999 {
3000         struct rte_eth_dev *dev;
3001
3002         if (port_id >= nb_ports) {
3003                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3004                 return -ENODEV;
3005         }
3006
3007         dev = &rte_eth_devices[port_id];
3008         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_2tuple_filter, -ENOTSUP);
3009         return (*dev->dev_ops->remove_2tuple_filter)(dev, index);
3010 }
3011
3012 int
3013 rte_eth_dev_get_2tuple_filter(uint8_t port_id, uint16_t index,
3014                         struct rte_2tuple_filter *filter, uint16_t *rx_queue)
3015 {
3016         struct rte_eth_dev *dev;
3017
3018         if (filter == NULL || rx_queue == NULL)
3019                 return -EINVAL;
3020
3021         if (port_id >= nb_ports) {
3022                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3023                 return -ENODEV;
3024         }
3025
3026         dev = &rte_eth_devices[port_id];
3027         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_2tuple_filter, -ENOTSUP);
3028         return (*dev->dev_ops->get_2tuple_filter)(dev, index, filter, rx_queue);
3029 }
3030
3031 int
3032 rte_eth_dev_add_5tuple_filter(uint8_t port_id, uint16_t index,
3033                         struct rte_5tuple_filter *filter, uint16_t rx_queue)
3034 {
3035         struct rte_eth_dev *dev;
3036
3037         if (port_id >= nb_ports) {
3038                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3039                 return -ENODEV;
3040         }
3041
3042         if (filter->protocol != IPPROTO_TCP &&
3043                 filter->tcp_flags != 0){
3044                 PMD_DEBUG_TRACE("tcp flags is 0x%x, but the protocol value"
3045                         " is not TCP\n",
3046                         filter->tcp_flags);
3047                 return -EINVAL;
3048         }
3049
3050         dev = &rte_eth_devices[port_id];
3051         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_5tuple_filter, -ENOTSUP);
3052         return (*dev->dev_ops->add_5tuple_filter)(dev, index, filter, rx_queue);
3053 }
3054
3055 int
3056 rte_eth_dev_remove_5tuple_filter(uint8_t port_id, uint16_t index)
3057 {
3058         struct rte_eth_dev *dev;
3059
3060         if (port_id >= nb_ports) {
3061                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3062                 return -ENODEV;
3063         }
3064
3065         dev = &rte_eth_devices[port_id];
3066         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_5tuple_filter, -ENOTSUP);
3067         return (*dev->dev_ops->remove_5tuple_filter)(dev, index);
3068 }
3069
3070 int
3071 rte_eth_dev_get_5tuple_filter(uint8_t port_id, uint16_t index,
3072                         struct rte_5tuple_filter *filter, uint16_t *rx_queue)
3073 {
3074         struct rte_eth_dev *dev;
3075
3076         if (filter == NULL || rx_queue == NULL)
3077                 return -EINVAL;
3078
3079         if (port_id >= nb_ports) {
3080                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3081                 return -ENODEV;
3082         }
3083
3084         dev = &rte_eth_devices[port_id];
3085         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_5tuple_filter, -ENOTSUP);
3086         return (*dev->dev_ops->get_5tuple_filter)(dev, index, filter,
3087                                                 rx_queue);
3088 }
3089
3090 int
3091 rte_eth_dev_add_flex_filter(uint8_t port_id, uint16_t index,
3092                         struct rte_flex_filter *filter, uint16_t rx_queue)
3093 {
3094         struct rte_eth_dev *dev;
3095
3096         if (port_id >= nb_ports) {
3097                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3098                 return -ENODEV;
3099         }
3100
3101         dev = &rte_eth_devices[port_id];
3102         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_flex_filter, -ENOTSUP);
3103         return (*dev->dev_ops->add_flex_filter)(dev, index, filter, rx_queue);
3104 }
3105
3106 int
3107 rte_eth_dev_remove_flex_filter(uint8_t port_id, uint16_t index)
3108 {
3109         struct rte_eth_dev *dev;
3110
3111         if (port_id >= nb_ports) {
3112                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3113                 return -ENODEV;
3114         }
3115
3116         dev = &rte_eth_devices[port_id];
3117         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_flex_filter, -ENOTSUP);
3118         return (*dev->dev_ops->remove_flex_filter)(dev, index);
3119 }
3120
3121 int
3122 rte_eth_dev_get_flex_filter(uint8_t port_id, uint16_t index,
3123                         struct rte_flex_filter *filter, uint16_t *rx_queue)
3124 {
3125         struct rte_eth_dev *dev;
3126
3127         if (filter == NULL || rx_queue == NULL)
3128                 return -EINVAL;
3129
3130         if (port_id >= nb_ports) {
3131                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3132                 return -ENODEV;
3133         }
3134
3135         dev = &rte_eth_devices[port_id];
3136         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_flex_filter, -ENOTSUP);
3137         return (*dev->dev_ops->get_flex_filter)(dev, index, filter,
3138                                                 rx_queue);
3139 }