ethdev: add link state interrupt flag
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while(0)
87 #define PROC_PRIMARY_OR_RET() do { \
88         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
89                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
90                 return; \
91         } \
92 } while(0)
93
94 /* Macros to check for invlaid function pointers in dev_ops structure */
95 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
96         if ((func) == NULL) { \
97                 PMD_DEBUG_TRACE("Function not supported\n"); \
98                 return (retval); \
99         } \
100 } while(0)
101 #define FUNC_PTR_OR_RET(func) do { \
102         if ((func) == NULL) { \
103                 PMD_DEBUG_TRACE("Function not supported\n"); \
104                 return; \
105         } \
106 } while(0)
107
108 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
109 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
110 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
111 static uint8_t nb_ports = 0;
112
113 /* spinlock for eth device callbacks */
114 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
115
116 /**
117  * The user application callback description.
118  *
119  * It contains callback address to be registered by user application,
120  * the pointer to the parameters for callback, and the event type.
121  */
122 struct rte_eth_dev_callback {
123         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
124         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
125         void *cb_arg;                           /**< Parameter for callback */
126         enum rte_eth_event_type event;          /**< Interrupt event type */
127         uint32_t active;                        /**< Callback is executing */
128 };
129
130 enum {
131         STAT_QMAP_TX = 0,
132         STAT_QMAP_RX
133 };
134
135 static inline void
136 rte_eth_dev_data_alloc(void)
137 {
138         const unsigned flags = 0;
139         const struct rte_memzone *mz;
140
141         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
142                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
143                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
144                                 rte_socket_id(), flags);
145         } else
146                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
147         if (mz == NULL)
148                 rte_panic("Cannot allocate memzone for ethernet port data\n");
149
150         rte_eth_dev_data = mz->addr;
151         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
152                 memset(rte_eth_dev_data, 0,
153                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
154 }
155
156 struct rte_eth_dev *
157 rte_eth_dev_allocate(void)
158 {
159         struct rte_eth_dev *eth_dev;
160
161         if (nb_ports == RTE_MAX_ETHPORTS) {
162                 PMD_DEBUG_TRACE("Reached maximum number of ethernet ports\n");
163                 return NULL;
164         }
165
166         if (rte_eth_dev_data == NULL)
167                 rte_eth_dev_data_alloc();
168
169         eth_dev = &rte_eth_devices[nb_ports];
170         eth_dev->data = &rte_eth_dev_data[nb_ports];
171         eth_dev->data->port_id = nb_ports++;
172         return eth_dev;
173 }
174
175 static int
176 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
177                  struct rte_pci_device *pci_dev)
178 {
179         struct eth_driver    *eth_drv;
180         struct rte_eth_dev *eth_dev;
181         int diag;
182
183         eth_drv = (struct eth_driver *)pci_drv;
184
185         eth_dev = rte_eth_dev_allocate();
186         if (eth_dev == NULL)
187                 return -ENOMEM;
188
189         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
190                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
191                                   eth_drv->dev_private_size,
192                                   CACHE_LINE_SIZE);
193                 if (eth_dev->data->dev_private == NULL)
194                         rte_panic("Cannot allocate memzone for private port data\n");
195         }
196         eth_dev->pci_dev = pci_dev;
197         eth_dev->driver = eth_drv;
198         eth_dev->data->rx_mbuf_alloc_failed = 0;
199
200         /* init user callbacks */
201         TAILQ_INIT(&(eth_dev->callbacks));
202
203         /*
204          * Set the default MTU.
205          */
206         eth_dev->data->mtu = ETHER_MTU;
207
208         /* Invoke PMD device initialization function */
209         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
210         if (diag == 0)
211                 return (0);
212
213         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
214                         " failed\n", pci_drv->name,
215                         (unsigned) pci_dev->id.vendor_id,
216                         (unsigned) pci_dev->id.device_id);
217         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
218                 rte_free(eth_dev->data->dev_private);
219         nb_ports--;
220         return diag;
221 }
222
223 /**
224  * Register an Ethernet [Poll Mode] driver.
225  *
226  * Function invoked by the initialization function of an Ethernet driver
227  * to simultaneously register itself as a PCI driver and as an Ethernet
228  * Poll Mode Driver.
229  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
230  * structure embedded in the *eth_drv* structure, after having stored the
231  * address of the rte_eth_dev_init() function in the *devinit* field of
232  * the *pci_drv* structure.
233  * During the PCI probing phase, the rte_eth_dev_init() function is
234  * invoked for each PCI [Ethernet device] matching the embedded PCI
235  * identifiers provided by the driver.
236  */
237 void
238 rte_eth_driver_register(struct eth_driver *eth_drv)
239 {
240         eth_drv->pci_drv.devinit = rte_eth_dev_init;
241         rte_eal_pci_register(&eth_drv->pci_drv);
242 }
243
244 int
245 rte_eth_dev_socket_id(uint8_t port_id)
246 {
247         if (port_id >= nb_ports)
248                 return -1;
249         return rte_eth_devices[port_id].pci_dev->numa_node;
250 }
251
252 uint8_t
253 rte_eth_dev_count(void)
254 {
255         return (nb_ports);
256 }
257
258 static int
259 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
260 {
261         uint16_t old_nb_queues = dev->data->nb_rx_queues;
262         void **rxq;
263         unsigned i;
264
265         if (dev->data->rx_queues == NULL) { /* first time configuration */
266                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
267                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
268                                 CACHE_LINE_SIZE);
269                 if (dev->data->rx_queues == NULL) {
270                         dev->data->nb_rx_queues = 0;
271                         return -(ENOMEM);
272                 }
273         } else { /* re-configure */
274                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
275
276                 rxq = dev->data->rx_queues;
277
278                 for (i = nb_queues; i < old_nb_queues; i++)
279                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
280                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
281                                 CACHE_LINE_SIZE);
282                 if (rxq == NULL)
283                         return -(ENOMEM);
284
285                 if (nb_queues > old_nb_queues)
286                         memset(rxq + old_nb_queues, 0,
287                                 sizeof(rxq[0]) * (nb_queues - old_nb_queues));
288
289                 dev->data->rx_queues = rxq;
290
291         }
292         dev->data->nb_rx_queues = nb_queues;
293         return (0);
294 }
295
296 int
297 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
298 {
299         struct rte_eth_dev *dev;
300
301         /* This function is only safe when called from the primary process
302          * in a multi-process setup*/
303         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
304
305         if (port_id >= nb_ports) {
306                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
307                 return -EINVAL;
308         }
309
310         dev = &rte_eth_devices[port_id];
311         if (rx_queue_id >= dev->data->nb_rx_queues) {
312                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
313                 return -EINVAL;
314         }
315
316         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
317
318         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
319
320 }
321
322 int
323 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
324 {
325         struct rte_eth_dev *dev;
326
327         /* This function is only safe when called from the primary process
328          * in a multi-process setup*/
329         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
330
331         if (port_id >= nb_ports) {
332                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
333                 return -EINVAL;
334         }
335
336         dev = &rte_eth_devices[port_id];
337         if (rx_queue_id >= dev->data->nb_rx_queues) {
338                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
339                 return -EINVAL;
340         }
341
342         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
343
344         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
345
346 }
347
348 int
349 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
350 {
351         struct rte_eth_dev *dev;
352
353         /* This function is only safe when called from the primary process
354          * in a multi-process setup*/
355         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
356
357         if (port_id >= nb_ports) {
358                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
359                 return -EINVAL;
360         }
361
362         dev = &rte_eth_devices[port_id];
363         if (tx_queue_id >= dev->data->nb_tx_queues) {
364                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
365                 return -EINVAL;
366         }
367
368         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
369
370         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
371
372 }
373
374 int
375 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
376 {
377         struct rte_eth_dev *dev;
378
379         /* This function is only safe when called from the primary process
380          * in a multi-process setup*/
381         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
382
383         if (port_id >= nb_ports) {
384                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
385                 return -EINVAL;
386         }
387
388         dev = &rte_eth_devices[port_id];
389         if (tx_queue_id >= dev->data->nb_tx_queues) {
390                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
391                 return -EINVAL;
392         }
393
394         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
395
396         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
397
398 }
399
400 static int
401 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
402 {
403         uint16_t old_nb_queues = dev->data->nb_tx_queues;
404         void **txq;
405         unsigned i;
406
407         if (dev->data->tx_queues == NULL) { /* first time configuration */
408                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
409                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
410                                 CACHE_LINE_SIZE);
411                 if (dev->data->tx_queues == NULL) {
412                         dev->data->nb_tx_queues = 0;
413                         return -(ENOMEM);
414                 }
415         } else { /* re-configure */
416                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
417
418                 txq = dev->data->tx_queues;
419
420                 for (i = nb_queues; i < old_nb_queues; i++)
421                         (*dev->dev_ops->tx_queue_release)(txq[i]);
422                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
423                                 CACHE_LINE_SIZE);
424                 if (txq == NULL)
425                         return -(ENOMEM);
426
427                 if (nb_queues > old_nb_queues)
428                         memset(txq + old_nb_queues, 0,
429                                 sizeof(txq[0]) * (nb_queues - old_nb_queues));
430
431                 dev->data->tx_queues = txq;
432
433         }
434         dev->data->nb_tx_queues = nb_queues;
435         return (0);
436 }
437
438 static int
439 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
440                       const struct rte_eth_conf *dev_conf)
441 {
442         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
443
444         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
445                 /* check multi-queue mode */
446                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ||
447                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
448                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
449                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
450                         /* SRIOV only works in VMDq enable mode */
451                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
452                                         " SRIOV active, "
453                                         "wrong VMDQ mq_mode rx %u tx %u\n",
454                                         port_id,
455                                         dev_conf->rxmode.mq_mode,
456                                         dev_conf->txmode.mq_mode);
457                         return (-EINVAL);
458                 }
459
460                 switch (dev_conf->rxmode.mq_mode) {
461                 case ETH_MQ_RX_VMDQ_RSS:
462                 case ETH_MQ_RX_VMDQ_DCB:
463                 case ETH_MQ_RX_VMDQ_DCB_RSS:
464                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
465                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
466                                         " SRIOV active, "
467                                         "unsupported VMDQ mq_mode rx %u\n",
468                                         port_id, dev_conf->rxmode.mq_mode);
469                         return (-EINVAL);
470                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
471                         /* if nothing mq mode configure, use default scheme */
472                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
473                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
474                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
475                         break;
476                 }
477
478                 switch (dev_conf->txmode.mq_mode) {
479                 case ETH_MQ_TX_VMDQ_DCB:
480                         /* DCB VMDQ in SRIOV mode, not implement yet */
481                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
482                                         " SRIOV active, "
483                                         "unsupported VMDQ mq_mode tx %u\n",
484                                         port_id, dev_conf->txmode.mq_mode);
485                         return (-EINVAL);
486                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
487                         /* if nothing mq mode configure, use default scheme */
488                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
489                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
490                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
491                         break;
492                 }
493
494                 /* check valid queue number */
495                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
496                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
497                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
498                                     "queue number must less equal to %d\n",
499                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
500                         return (-EINVAL);
501                 }
502         } else {
503                 /* For vmdb+dcb mode check our configuration before we go further */
504                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
505                         const struct rte_eth_vmdq_dcb_conf *conf;
506
507                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
508                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
509                                                 "!= %d\n",
510                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
511                                 return (-EINVAL);
512                         }
513                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
514                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
515                                conf->nb_queue_pools == ETH_32_POOLS)) {
516                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
517                                                 "nb_queue_pools must be %d or %d\n",
518                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
519                                 return (-EINVAL);
520                         }
521                 }
522                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
523                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
524
525                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
526                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
527                                                 "!= %d\n",
528                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
529                                 return (-EINVAL);
530                         }
531                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
532                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
533                                conf->nb_queue_pools == ETH_32_POOLS)) {
534                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
535                                                 "nb_queue_pools != %d or nb_queue_pools "
536                                                 "!= %d\n",
537                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
538                                 return (-EINVAL);
539                         }
540                 }
541
542                 /* For DCB mode check our configuration before we go further */
543                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
544                         const struct rte_eth_dcb_rx_conf *conf;
545
546                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
547                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
548                                                 "!= %d\n",
549                                                 port_id, ETH_DCB_NUM_QUEUES);
550                                 return (-EINVAL);
551                         }
552                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
553                         if (! (conf->nb_tcs == ETH_4_TCS ||
554                                conf->nb_tcs == ETH_8_TCS)) {
555                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
556                                                 "nb_tcs != %d or nb_tcs "
557                                                 "!= %d\n",
558                                                 port_id, ETH_4_TCS, ETH_8_TCS);
559                                 return (-EINVAL);
560                         }
561                 }
562
563                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
564                         const struct rte_eth_dcb_tx_conf *conf;
565
566                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
567                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
568                                                 "!= %d\n",
569                                                 port_id, ETH_DCB_NUM_QUEUES);
570                                 return (-EINVAL);
571                         }
572                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
573                         if (! (conf->nb_tcs == ETH_4_TCS ||
574                                conf->nb_tcs == ETH_8_TCS)) {
575                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
576                                                 "nb_tcs != %d or nb_tcs "
577                                                 "!= %d\n",
578                                                 port_id, ETH_4_TCS, ETH_8_TCS);
579                                 return (-EINVAL);
580                         }
581                 }
582         }
583         return 0;
584 }
585
586 int
587 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
588                       const struct rte_eth_conf *dev_conf)
589 {
590         struct rte_eth_dev *dev;
591         struct rte_eth_dev_info dev_info;
592         int diag;
593
594         /* This function is only safe when called from the primary process
595          * in a multi-process setup*/
596         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
597
598         if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
599                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
600                 return (-EINVAL);
601         }
602         dev = &rte_eth_devices[port_id];
603
604         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
605         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
606
607         if (dev->data->dev_started) {
608                 PMD_DEBUG_TRACE(
609                     "port %d must be stopped to allow configuration\n", port_id);
610                 return (-EBUSY);
611         }
612
613         /*
614          * Check that the numbers of RX and TX queues are not greater
615          * than the maximum number of RX and TX queues supported by the
616          * configured device.
617          */
618         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
619         if (nb_rx_q > dev_info.max_rx_queues) {
620                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
621                                 port_id, nb_rx_q, dev_info.max_rx_queues);
622                 return (-EINVAL);
623         }
624         if (nb_rx_q == 0) {
625                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
626                 return (-EINVAL);
627         }
628
629         if (nb_tx_q > dev_info.max_tx_queues) {
630                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
631                                 port_id, nb_tx_q, dev_info.max_tx_queues);
632                 return (-EINVAL);
633         }
634         if (nb_tx_q == 0) {
635                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
636                 return (-EINVAL);
637         }
638
639         /* Copy the dev_conf parameter into the dev structure */
640         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
641
642         /*
643          * If link state interrupt is enabled, check that the
644          * device supports it.
645          */
646         if (dev_conf->intr_conf.lsc == 1) {
647                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
648
649                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
650                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
651                                         pci_drv->name);
652                         return (-EINVAL);
653                 }
654         }
655
656         /*
657          * If jumbo frames are enabled, check that the maximum RX packet
658          * length is supported by the configured device.
659          */
660         if (dev_conf->rxmode.jumbo_frame == 1) {
661                 if (dev_conf->rxmode.max_rx_pkt_len >
662                     dev_info.max_rx_pktlen) {
663                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
664                                 " > max valid value %u\n",
665                                 port_id,
666                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
667                                 (unsigned)dev_info.max_rx_pktlen);
668                         return (-EINVAL);
669                 }
670                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
671                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
672                                 " < min valid value %u\n",
673                                 port_id,
674                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
675                                 (unsigned)ETHER_MIN_LEN);
676                         return (-EINVAL);
677                 }
678         } else {
679                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
680                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
681                         /* Use default value */
682                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
683                                                         ETHER_MAX_LEN;
684         }
685
686         /* multipe queue mode checking */
687         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
688         if (diag != 0) {
689                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
690                                 port_id, diag);
691                 return diag;
692         }
693
694         /*
695          * Setup new number of RX/TX queues and reconfigure device.
696          */
697         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
698         if (diag != 0) {
699                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
700                                 port_id, diag);
701                 return diag;
702         }
703
704         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
705         if (diag != 0) {
706                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
707                                 port_id, diag);
708                 rte_eth_dev_rx_queue_config(dev, 0);
709                 return diag;
710         }
711
712         diag = (*dev->dev_ops->dev_configure)(dev);
713         if (diag != 0) {
714                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
715                                 port_id, diag);
716                 rte_eth_dev_rx_queue_config(dev, 0);
717                 rte_eth_dev_tx_queue_config(dev, 0);
718                 return diag;
719         }
720
721         return 0;
722 }
723
724 static void
725 rte_eth_dev_config_restore(uint8_t port_id)
726 {
727         struct rte_eth_dev *dev;
728         struct rte_eth_dev_info dev_info;
729         struct ether_addr addr;
730         uint16_t i;
731         uint32_t pool = 0;
732
733         dev = &rte_eth_devices[port_id];
734
735         rte_eth_dev_info_get(port_id, &dev_info);
736
737         if (RTE_ETH_DEV_SRIOV(dev).active)
738                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
739
740         /* replay MAC address configuration */
741         for (i = 0; i < dev_info.max_mac_addrs; i++) {
742                 addr = dev->data->mac_addrs[i];
743
744                 /* skip zero address */
745                 if (is_zero_ether_addr(&addr))
746                         continue;
747
748                 /* add address to the hardware */
749                 if  (*dev->dev_ops->mac_addr_add)
750                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
751                 else {
752                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
753                                         port_id);
754                         /* exit the loop but not return an error */
755                         break;
756                 }
757         }
758
759         /* replay promiscuous configuration */
760         if (rte_eth_promiscuous_get(port_id) == 1)
761                 rte_eth_promiscuous_enable(port_id);
762         else if (rte_eth_promiscuous_get(port_id) == 0)
763                 rte_eth_promiscuous_disable(port_id);
764
765         /* replay allmulticast configuration */
766         if (rte_eth_allmulticast_get(port_id) == 1)
767                 rte_eth_allmulticast_enable(port_id);
768         else if (rte_eth_allmulticast_get(port_id) == 0)
769                 rte_eth_allmulticast_disable(port_id);
770 }
771
772 int
773 rte_eth_dev_start(uint8_t port_id)
774 {
775         struct rte_eth_dev *dev;
776         int diag;
777
778         /* This function is only safe when called from the primary process
779          * in a multi-process setup*/
780         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
781
782         if (port_id >= nb_ports) {
783                 PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
784                 return (-EINVAL);
785         }
786         dev = &rte_eth_devices[port_id];
787
788         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
789
790         if (dev->data->dev_started != 0) {
791                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
792                         " already started\n",
793                         port_id);
794                 return (0);
795         }
796
797         diag = (*dev->dev_ops->dev_start)(dev);
798         if (diag == 0)
799                 dev->data->dev_started = 1;
800         else
801                 return diag;
802
803         rte_eth_dev_config_restore(port_id);
804
805         return 0;
806 }
807
808 void
809 rte_eth_dev_stop(uint8_t port_id)
810 {
811         struct rte_eth_dev *dev;
812
813         /* This function is only safe when called from the primary process
814          * in a multi-process setup*/
815         PROC_PRIMARY_OR_RET();
816
817         if (port_id >= nb_ports) {
818                 PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
819                 return;
820         }
821         dev = &rte_eth_devices[port_id];
822
823         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
824
825         if (dev->data->dev_started == 0) {
826                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
827                         " already stopped\n",
828                         port_id);
829                 return;
830         }
831
832         dev->data->dev_started = 0;
833         (*dev->dev_ops->dev_stop)(dev);
834 }
835
836 int
837 rte_eth_dev_set_link_up(uint8_t port_id)
838 {
839         struct rte_eth_dev *dev;
840
841         /* This function is only safe when called from the primary process
842          * in a multi-process setup*/
843         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
844
845         if (port_id >= nb_ports) {
846                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
847                 return -EINVAL;
848         }
849         dev = &rte_eth_devices[port_id];
850
851         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
852         return (*dev->dev_ops->dev_set_link_up)(dev);
853 }
854
855 int
856 rte_eth_dev_set_link_down(uint8_t port_id)
857 {
858         struct rte_eth_dev *dev;
859
860         /* This function is only safe when called from the primary process
861          * in a multi-process setup*/
862         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
863
864         if (port_id >= nb_ports) {
865                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
866                 return -EINVAL;
867         }
868         dev = &rte_eth_devices[port_id];
869
870         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
871         return (*dev->dev_ops->dev_set_link_down)(dev);
872 }
873
874 void
875 rte_eth_dev_close(uint8_t port_id)
876 {
877         struct rte_eth_dev *dev;
878
879         /* This function is only safe when called from the primary process
880          * in a multi-process setup*/
881         PROC_PRIMARY_OR_RET();
882
883         if (port_id >= nb_ports) {
884                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
885                 return;
886         }
887
888         dev = &rte_eth_devices[port_id];
889
890         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
891         dev->data->dev_started = 0;
892         (*dev->dev_ops->dev_close)(dev);
893 }
894
895 int
896 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
897                        uint16_t nb_rx_desc, unsigned int socket_id,
898                        const struct rte_eth_rxconf *rx_conf,
899                        struct rte_mempool *mp)
900 {
901         int ret;
902         uint32_t mbp_buf_size;
903         struct rte_eth_dev *dev;
904         struct rte_pktmbuf_pool_private *mbp_priv;
905         struct rte_eth_dev_info dev_info;
906
907         /* This function is only safe when called from the primary process
908          * in a multi-process setup*/
909         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
910
911         if (port_id >= nb_ports) {
912                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
913                 return (-EINVAL);
914         }
915         dev = &rte_eth_devices[port_id];
916         if (rx_queue_id >= dev->data->nb_rx_queues) {
917                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
918                 return (-EINVAL);
919         }
920
921         if (dev->data->dev_started) {
922                 PMD_DEBUG_TRACE(
923                     "port %d must be stopped to allow configuration\n", port_id);
924                 return -EBUSY;
925         }
926
927         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
928         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
929
930         /*
931          * Check the size of the mbuf data buffer.
932          * This value must be provided in the private data of the memory pool.
933          * First check that the memory pool has a valid private data.
934          */
935         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
936         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
937                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
938                                 mp->name, (int) mp->private_data_size,
939                                 (int) sizeof(struct rte_pktmbuf_pool_private));
940                 return (-ENOSPC);
941         }
942         mbp_priv = rte_mempool_get_priv(mp);
943         mbp_buf_size = mbp_priv->mbuf_data_room_size;
944
945         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
946                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
947                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
948                                 "=%d)\n",
949                                 mp->name,
950                                 (int)mbp_buf_size,
951                                 (int)(RTE_PKTMBUF_HEADROOM +
952                                       dev_info.min_rx_bufsize),
953                                 (int)RTE_PKTMBUF_HEADROOM,
954                                 (int)dev_info.min_rx_bufsize);
955                 return (-EINVAL);
956         }
957
958         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
959                                               socket_id, rx_conf, mp);
960         if (!ret) {
961                 if (!dev->data->min_rx_buf_size ||
962                     dev->data->min_rx_buf_size > mbp_buf_size)
963                         dev->data->min_rx_buf_size = mbp_buf_size;
964         }
965
966         return ret;
967 }
968
969 int
970 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
971                        uint16_t nb_tx_desc, unsigned int socket_id,
972                        const struct rte_eth_txconf *tx_conf)
973 {
974         struct rte_eth_dev *dev;
975
976         /* This function is only safe when called from the primary process
977          * in a multi-process setup*/
978         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
979
980         if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
981                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
982                 return (-EINVAL);
983         }
984         dev = &rte_eth_devices[port_id];
985         if (tx_queue_id >= dev->data->nb_tx_queues) {
986                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
987                 return (-EINVAL);
988         }
989
990         if (dev->data->dev_started) {
991                 PMD_DEBUG_TRACE(
992                     "port %d must be stopped to allow configuration\n", port_id);
993                 return -EBUSY;
994         }
995
996         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
997         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
998                                                socket_id, tx_conf);
999 }
1000
1001 void
1002 rte_eth_promiscuous_enable(uint8_t port_id)
1003 {
1004         struct rte_eth_dev *dev;
1005
1006         if (port_id >= nb_ports) {
1007                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1008                 return;
1009         }
1010         dev = &rte_eth_devices[port_id];
1011
1012         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1013         (*dev->dev_ops->promiscuous_enable)(dev);
1014         dev->data->promiscuous = 1;
1015 }
1016
1017 void
1018 rte_eth_promiscuous_disable(uint8_t port_id)
1019 {
1020         struct rte_eth_dev *dev;
1021
1022         if (port_id >= nb_ports) {
1023                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1024                 return;
1025         }
1026         dev = &rte_eth_devices[port_id];
1027
1028         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1029         dev->data->promiscuous = 0;
1030         (*dev->dev_ops->promiscuous_disable)(dev);
1031 }
1032
1033 int
1034 rte_eth_promiscuous_get(uint8_t port_id)
1035 {
1036         struct rte_eth_dev *dev;
1037
1038         if (port_id >= nb_ports) {
1039                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1040                 return -1;
1041         }
1042
1043         dev = &rte_eth_devices[port_id];
1044         return dev->data->promiscuous;
1045 }
1046
1047 void
1048 rte_eth_allmulticast_enable(uint8_t port_id)
1049 {
1050         struct rte_eth_dev *dev;
1051
1052         if (port_id >= nb_ports) {
1053                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1054                 return;
1055         }
1056         dev = &rte_eth_devices[port_id];
1057
1058         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1059         (*dev->dev_ops->allmulticast_enable)(dev);
1060         dev->data->all_multicast = 1;
1061 }
1062
1063 void
1064 rte_eth_allmulticast_disable(uint8_t port_id)
1065 {
1066         struct rte_eth_dev *dev;
1067
1068         if (port_id >= nb_ports) {
1069                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1070                 return;
1071         }
1072         dev = &rte_eth_devices[port_id];
1073
1074         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1075         dev->data->all_multicast = 0;
1076         (*dev->dev_ops->allmulticast_disable)(dev);
1077 }
1078
1079 int
1080 rte_eth_allmulticast_get(uint8_t port_id)
1081 {
1082         struct rte_eth_dev *dev;
1083
1084         if (port_id >= nb_ports) {
1085                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1086                 return -1;
1087         }
1088
1089         dev = &rte_eth_devices[port_id];
1090         return dev->data->all_multicast;
1091 }
1092
1093 static inline int
1094 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1095                                 struct rte_eth_link *link)
1096 {
1097         struct rte_eth_link *dst = link;
1098         struct rte_eth_link *src = &(dev->data->dev_link);
1099
1100         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1101                                         *(uint64_t *)src) == 0)
1102                 return -1;
1103
1104         return 0;
1105 }
1106
1107 void
1108 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1109 {
1110         struct rte_eth_dev *dev;
1111
1112         if (port_id >= nb_ports) {
1113                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1114                 return;
1115         }
1116         dev = &rte_eth_devices[port_id];
1117
1118         if (dev->data->dev_conf.intr_conf.lsc != 0)
1119                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1120         else {
1121                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1122                 (*dev->dev_ops->link_update)(dev, 1);
1123                 *eth_link = dev->data->dev_link;
1124         }
1125 }
1126
1127 void
1128 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1129 {
1130         struct rte_eth_dev *dev;
1131
1132         if (port_id >= nb_ports) {
1133                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1134                 return;
1135         }
1136         dev = &rte_eth_devices[port_id];
1137
1138         if (dev->data->dev_conf.intr_conf.lsc != 0)
1139                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1140         else {
1141                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1142                 (*dev->dev_ops->link_update)(dev, 0);
1143                 *eth_link = dev->data->dev_link;
1144         }
1145 }
1146
1147 void
1148 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1149 {
1150         struct rte_eth_dev *dev;
1151
1152         if (port_id >= nb_ports) {
1153                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1154                 return;
1155         }
1156         dev = &rte_eth_devices[port_id];
1157         memset(stats, 0, sizeof(*stats));
1158
1159         FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
1160         (*dev->dev_ops->stats_get)(dev, stats);
1161         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1162 }
1163
1164 void
1165 rte_eth_stats_reset(uint8_t port_id)
1166 {
1167         struct rte_eth_dev *dev;
1168
1169         if (port_id >= nb_ports) {
1170                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1171                 return;
1172         }
1173         dev = &rte_eth_devices[port_id];
1174
1175         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1176         (*dev->dev_ops->stats_reset)(dev);
1177 }
1178
1179
1180 static int
1181 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1182                 uint8_t is_rx)
1183 {
1184         struct rte_eth_dev *dev;
1185
1186         if (port_id >= nb_ports) {
1187                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1188                 return -ENODEV;
1189         }
1190         dev = &rte_eth_devices[port_id];
1191
1192         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1193         return (*dev->dev_ops->queue_stats_mapping_set)
1194                         (dev, queue_id, stat_idx, is_rx);
1195 }
1196
1197
1198 int
1199 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1200                 uint8_t stat_idx)
1201 {
1202         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1203                         STAT_QMAP_TX);
1204 }
1205
1206
1207 int
1208 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1209                 uint8_t stat_idx)
1210 {
1211         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1212                         STAT_QMAP_RX);
1213 }
1214
1215
1216 void
1217 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1218 {
1219         struct rte_eth_dev *dev;
1220
1221         if (port_id >= nb_ports) {
1222                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1223                 return;
1224         }
1225         dev = &rte_eth_devices[port_id];
1226
1227         /* Default device offload capabilities to zero */
1228         dev_info->rx_offload_capa = 0;
1229         dev_info->tx_offload_capa = 0;
1230         dev_info->if_index = 0;
1231         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1232         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1233         dev_info->pci_dev = dev->pci_dev;
1234         if (dev->driver)
1235                 dev_info->driver_name = dev->driver->pci_drv.name;
1236 }
1237
1238 void
1239 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1240 {
1241         struct rte_eth_dev *dev;
1242
1243         if (port_id >= nb_ports) {
1244                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1245                 return;
1246         }
1247         dev = &rte_eth_devices[port_id];
1248         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1249 }
1250
1251
1252 int
1253 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1254 {
1255         struct rte_eth_dev *dev;
1256
1257         if (port_id >= nb_ports) {
1258                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1259                 return (-ENODEV);
1260         }
1261
1262         dev = &rte_eth_devices[port_id];
1263         *mtu = dev->data->mtu;
1264         return 0;
1265 }
1266
1267 int
1268 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1269 {
1270         int ret;
1271         struct rte_eth_dev *dev;
1272
1273         if (port_id >= nb_ports) {
1274                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1275                 return (-ENODEV);
1276         }
1277
1278         dev = &rte_eth_devices[port_id];
1279         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1280
1281         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1282         if (!ret)
1283                 dev->data->mtu = mtu;
1284
1285         return ret;
1286 }
1287
1288 int
1289 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1290 {
1291         struct rte_eth_dev *dev;
1292
1293         if (port_id >= nb_ports) {
1294                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1295                 return (-ENODEV);
1296         }
1297         dev = &rte_eth_devices[port_id];
1298         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1299                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1300                 return (-ENOSYS);
1301         }
1302
1303         if (vlan_id > 4095) {
1304                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1305                                 port_id, (unsigned) vlan_id);
1306                 return (-EINVAL);
1307         }
1308         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1309         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1310         return (0);
1311 }
1312
1313 int
1314 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1315 {
1316         struct rte_eth_dev *dev;
1317
1318         if (port_id >= nb_ports) {
1319                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1320                 return (-ENODEV);
1321         }
1322
1323         dev = &rte_eth_devices[port_id];
1324         if (rx_queue_id >= dev->data->nb_rx_queues) {
1325                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1326                 return (-EINVAL);
1327         }
1328
1329         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1330         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1331
1332         return (0);
1333 }
1334
1335 int
1336 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1337 {
1338         struct rte_eth_dev *dev;
1339
1340         if (port_id >= nb_ports) {
1341                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1342                 return (-ENODEV);
1343         }
1344
1345         dev = &rte_eth_devices[port_id];
1346         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1347         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1348
1349         return (0);
1350 }
1351
1352 int
1353 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1354 {
1355         struct rte_eth_dev *dev;
1356         int ret = 0;
1357         int mask = 0;
1358         int cur, org = 0;
1359
1360         if (port_id >= nb_ports) {
1361                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1362                 return (-ENODEV);
1363         }
1364
1365         dev = &rte_eth_devices[port_id];
1366
1367         /*check which option changed by application*/
1368         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1369         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1370         if (cur != org){
1371                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1372                 mask |= ETH_VLAN_STRIP_MASK;
1373         }
1374
1375         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1376         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1377         if (cur != org){
1378                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1379                 mask |= ETH_VLAN_FILTER_MASK;
1380         }
1381
1382         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1383         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1384         if (cur != org){
1385                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1386                 mask |= ETH_VLAN_EXTEND_MASK;
1387         }
1388
1389         /*no change*/
1390         if(mask == 0)
1391                 return ret;
1392
1393         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1394         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1395
1396         return ret;
1397 }
1398
1399 int
1400 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1401 {
1402         struct rte_eth_dev *dev;
1403         int ret = 0;
1404
1405         if (port_id >= nb_ports) {
1406                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1407                 return (-ENODEV);
1408         }
1409
1410         dev = &rte_eth_devices[port_id];
1411
1412         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1413                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1414
1415         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1416                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1417
1418         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1419                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1420
1421         return ret;
1422 }
1423
1424 int
1425 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1426 {
1427         struct rte_eth_dev *dev;
1428
1429         if (port_id >= nb_ports) {
1430                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1431                 return (-ENODEV);
1432         }
1433         dev = &rte_eth_devices[port_id];
1434         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1435         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1436
1437         return 0;
1438 }
1439
1440 int
1441 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1442                                       struct rte_fdir_filter *fdir_filter,
1443                                       uint8_t queue)
1444 {
1445         struct rte_eth_dev *dev;
1446
1447         if (port_id >= nb_ports) {
1448                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1449                 return (-ENODEV);
1450         }
1451
1452         dev = &rte_eth_devices[port_id];
1453
1454         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1455                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1456                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1457                 return (-ENOSYS);
1458         }
1459
1460         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1461              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1462             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1463                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1464                                 "None l4type, source & destinations ports " \
1465                                 "should be null!\n");
1466                 return (-EINVAL);
1467         }
1468
1469         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1470         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1471                                                                 queue);
1472 }
1473
1474 int
1475 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1476                                          struct rte_fdir_filter *fdir_filter,
1477                                          uint8_t queue)
1478 {
1479         struct rte_eth_dev *dev;
1480
1481         if (port_id >= nb_ports) {
1482                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1483                 return (-ENODEV);
1484         }
1485
1486         dev = &rte_eth_devices[port_id];
1487
1488         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1489                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1490                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1491                 return (-ENOSYS);
1492         }
1493
1494         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1495              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1496             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1497                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1498                                 "None l4type, source & destinations ports " \
1499                                 "should be null!\n");
1500                 return (-EINVAL);
1501         }
1502
1503         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1504         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1505                                                                 queue);
1506
1507 }
1508
1509 int
1510 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1511                                          struct rte_fdir_filter *fdir_filter)
1512 {
1513         struct rte_eth_dev *dev;
1514
1515         if (port_id >= nb_ports) {
1516                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1517                 return (-ENODEV);
1518         }
1519
1520         dev = &rte_eth_devices[port_id];
1521
1522         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1523                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1524                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1525                 return (-ENOSYS);
1526         }
1527
1528         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1529              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1530             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1531                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1532                                 "None l4type source & destinations ports " \
1533                                 "should be null!\n");
1534                 return (-EINVAL);
1535         }
1536
1537         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1538         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1539 }
1540
1541 int
1542 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1543 {
1544         struct rte_eth_dev *dev;
1545
1546         if (port_id >= nb_ports) {
1547                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1548                 return (-ENODEV);
1549         }
1550
1551         dev = &rte_eth_devices[port_id];
1552         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1553                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1554                 return (-ENOSYS);
1555         }
1556
1557         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1558
1559         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1560         return (0);
1561 }
1562
1563 int
1564 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1565                                     struct rte_fdir_filter *fdir_filter,
1566                                     uint16_t soft_id, uint8_t queue,
1567                                     uint8_t drop)
1568 {
1569         struct rte_eth_dev *dev;
1570
1571         if (port_id >= nb_ports) {
1572                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1573                 return (-ENODEV);
1574         }
1575
1576         dev = &rte_eth_devices[port_id];
1577
1578         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1579                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1580                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1581                 return (-ENOSYS);
1582         }
1583
1584         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1585              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1586             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1587                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1588                                 "None l4type, source & destinations ports " \
1589                                 "should be null!\n");
1590                 return (-EINVAL);
1591         }
1592
1593         /* For now IPv6 is not supported with perfect filter */
1594         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1595                 return (-ENOTSUP);
1596
1597         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1598         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1599                                                                 soft_id, queue,
1600                                                                 drop);
1601 }
1602
1603 int
1604 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1605                                        struct rte_fdir_filter *fdir_filter,
1606                                        uint16_t soft_id, uint8_t queue,
1607                                        uint8_t drop)
1608 {
1609         struct rte_eth_dev *dev;
1610
1611         if (port_id >= nb_ports) {
1612                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1613                 return (-ENODEV);
1614         }
1615
1616         dev = &rte_eth_devices[port_id];
1617
1618         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1619                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1620                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1621                 return (-ENOSYS);
1622         }
1623
1624         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1625              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1626             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1627                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1628                                 "None l4type, source & destinations ports " \
1629                                 "should be null!\n");
1630                 return (-EINVAL);
1631         }
1632
1633         /* For now IPv6 is not supported with perfect filter */
1634         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1635                 return (-ENOTSUP);
1636
1637         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1638         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1639                                                         soft_id, queue, drop);
1640 }
1641
1642 int
1643 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1644                                        struct rte_fdir_filter *fdir_filter,
1645                                        uint16_t soft_id)
1646 {
1647         struct rte_eth_dev *dev;
1648
1649         if (port_id >= nb_ports) {
1650                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1651                 return (-ENODEV);
1652         }
1653
1654         dev = &rte_eth_devices[port_id];
1655
1656         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1657                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1658                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1659                 return (-ENOSYS);
1660         }
1661
1662         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1663              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1664             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1665                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1666                                 "None l4type, source & destinations ports " \
1667                                 "should be null!\n");
1668                 return (-EINVAL);
1669         }
1670
1671         /* For now IPv6 is not supported with perfect filter */
1672         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1673                 return (-ENOTSUP);
1674
1675         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1676         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
1677                                                                 soft_id);
1678 }
1679
1680 int
1681 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1682 {
1683         struct rte_eth_dev *dev;
1684
1685         if (port_id >= nb_ports) {
1686                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1687                 return (-ENODEV);
1688         }
1689
1690         dev = &rte_eth_devices[port_id];
1691         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1692                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1693                 return (-ENOSYS);
1694         }
1695
1696         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1697         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1698 }
1699
1700 int
1701 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1702 {
1703         struct rte_eth_dev *dev;
1704
1705         if (port_id >= nb_ports) {
1706                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1707                 return (-ENODEV);
1708         }
1709
1710         dev = &rte_eth_devices[port_id];
1711         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1712         memset(fc_conf, 0, sizeof(*fc_conf));
1713         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1714 }
1715
1716 int
1717 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1718 {
1719         struct rte_eth_dev *dev;
1720
1721         if (port_id >= nb_ports) {
1722                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1723                 return (-ENODEV);
1724         }
1725
1726         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1727                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1728                 return (-EINVAL);
1729         }
1730
1731         dev = &rte_eth_devices[port_id];
1732         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1733         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1734 }
1735
1736 int
1737 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1738 {
1739         struct rte_eth_dev *dev;
1740
1741         if (port_id >= nb_ports) {
1742                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1743                 return (-ENODEV);
1744         }
1745
1746         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1747                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1748                 return (-EINVAL);
1749         }
1750
1751         dev = &rte_eth_devices[port_id];
1752         /* High water, low water validation are device specific */
1753         if  (*dev->dev_ops->priority_flow_ctrl_set)
1754                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1755         return (-ENOTSUP);
1756 }
1757
1758 int
1759 rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1760 {
1761         struct rte_eth_dev *dev;
1762         uint16_t max_rxq;
1763         uint8_t i,j;
1764
1765         if (port_id >= nb_ports) {
1766                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1767                 return (-ENODEV);
1768         }
1769
1770         /* Invalid mask bit(s) setting */
1771         if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1772                 PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
1773                 return (-EINVAL);
1774         }
1775
1776         dev = &rte_eth_devices[port_id];
1777         max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ?
1778                 dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE;
1779         if (reta_conf->mask_lo != 0) {
1780                 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1781                         if ((reta_conf->mask_lo & (1ULL << i)) &&
1782                                 (reta_conf->reta[i] >= max_rxq)) {
1783                                 PMD_DEBUG_TRACE("RETA hash index output"
1784                                         "configration for port=%d,invalid"
1785                                         "queue=%d\n",port_id,reta_conf->reta[i]);
1786
1787                                 return (-EINVAL);
1788                         }
1789                 }
1790         }
1791
1792         if (reta_conf->mask_hi != 0) {
1793                 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1794                         j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
1795
1796                         /* Check if the max entry >= 128 */
1797                         if ((reta_conf->mask_hi & (1ULL << i)) &&
1798                                 (reta_conf->reta[j] >= max_rxq)) {
1799                                 PMD_DEBUG_TRACE("RETA hash index output"
1800                                         "configration for port=%d,invalid"
1801                                         "queue=%d\n",port_id,reta_conf->reta[j]);
1802
1803                                 return (-EINVAL);
1804                         }
1805                 }
1806         }
1807
1808         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1809         return (*dev->dev_ops->reta_update)(dev, reta_conf);
1810 }
1811
1812 int
1813 rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1814 {
1815         struct rte_eth_dev *dev;
1816
1817         if (port_id >= nb_ports) {
1818                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1819                 return (-ENODEV);
1820         }
1821
1822         if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1823                 PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
1824                 return (-EINVAL);
1825         }
1826
1827         dev = &rte_eth_devices[port_id];
1828         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1829         return (*dev->dev_ops->reta_query)(dev, reta_conf);
1830 }
1831
1832 int
1833 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1834 {
1835         struct rte_eth_dev *dev;
1836         uint16_t rss_hash_protos;
1837
1838         if (port_id >= nb_ports) {
1839                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1840                 return (-ENODEV);
1841         }
1842         rss_hash_protos = rss_conf->rss_hf;
1843         if ((rss_hash_protos != 0) &&
1844             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1845                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1846                                 rss_hash_protos);
1847                 return (-EINVAL);
1848         }
1849         dev = &rte_eth_devices[port_id];
1850         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1851         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1852 }
1853
1854 int
1855 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1856                               struct rte_eth_rss_conf *rss_conf)
1857 {
1858         struct rte_eth_dev *dev;
1859
1860         if (port_id >= nb_ports) {
1861                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1862                 return (-ENODEV);
1863         }
1864         dev = &rte_eth_devices[port_id];
1865         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1866         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1867 }
1868
1869 int
1870 rte_eth_led_on(uint8_t port_id)
1871 {
1872         struct rte_eth_dev *dev;
1873
1874         if (port_id >= nb_ports) {
1875                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1876                 return (-ENODEV);
1877         }
1878
1879         dev = &rte_eth_devices[port_id];
1880         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1881         return ((*dev->dev_ops->dev_led_on)(dev));
1882 }
1883
1884 int
1885 rte_eth_led_off(uint8_t port_id)
1886 {
1887         struct rte_eth_dev *dev;
1888
1889         if (port_id >= nb_ports) {
1890                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1891                 return (-ENODEV);
1892         }
1893
1894         dev = &rte_eth_devices[port_id];
1895         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1896         return ((*dev->dev_ops->dev_led_off)(dev));
1897 }
1898
1899 /*
1900  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1901  * an empty spot.
1902  */
1903 static inline int
1904 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1905 {
1906         struct rte_eth_dev_info dev_info;
1907         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1908         unsigned i;
1909
1910         rte_eth_dev_info_get(port_id, &dev_info);
1911
1912         for (i = 0; i < dev_info.max_mac_addrs; i++)
1913                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1914                         return i;
1915
1916         return -1;
1917 }
1918
1919 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
1920
1921 int
1922 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
1923                         uint32_t pool)
1924 {
1925         struct rte_eth_dev *dev;
1926         int index;
1927         uint64_t pool_mask;
1928
1929         if (port_id >= nb_ports) {
1930                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1931                 return (-ENODEV);
1932         }
1933         dev = &rte_eth_devices[port_id];
1934         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
1935
1936         if (is_zero_ether_addr(addr)) {
1937                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
1938                         port_id);
1939                 return (-EINVAL);
1940         }
1941         if (pool >= ETH_64_POOLS) {
1942                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
1943                 return (-EINVAL);
1944         }
1945
1946         index = get_mac_addr_index(port_id, addr);
1947         if (index < 0) {
1948                 index = get_mac_addr_index(port_id, &null_mac_addr);
1949                 if (index < 0) {
1950                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1951                                 port_id);
1952                         return (-ENOSPC);
1953                 }
1954         } else {
1955                 pool_mask = dev->data->mac_pool_sel[index];
1956
1957                 /* Check if both MAC address and pool is alread there, and do nothing */
1958                 if (pool_mask & (1ULL << pool))
1959                         return 0;
1960         }
1961
1962         /* Update NIC */
1963         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
1964
1965         /* Update address in NIC data structure */
1966         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
1967
1968         /* Update pool bitmap in NIC data structure */
1969         dev->data->mac_pool_sel[index] |= (1ULL << pool);
1970
1971         return 0;
1972 }
1973
1974 int
1975 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
1976 {
1977         struct rte_eth_dev *dev;
1978         int index;
1979
1980         if (port_id >= nb_ports) {
1981                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1982                 return (-ENODEV);
1983         }
1984         dev = &rte_eth_devices[port_id];
1985         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
1986
1987         index = get_mac_addr_index(port_id, addr);
1988         if (index == 0) {
1989                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
1990                 return (-EADDRINUSE);
1991         } else if (index < 0)
1992                 return 0;  /* Do nothing if address wasn't found */
1993
1994         /* Update NIC */
1995         (*dev->dev_ops->mac_addr_remove)(dev, index);
1996
1997         /* Update address in NIC data structure */
1998         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
1999
2000         return 0;
2001 }
2002
2003 int
2004 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2005                                 uint16_t rx_mode, uint8_t on)
2006 {
2007         uint16_t num_vfs;
2008         struct rte_eth_dev *dev;
2009         struct rte_eth_dev_info dev_info;
2010
2011         if (port_id >= nb_ports) {
2012                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
2013                                 port_id);
2014                 return (-ENODEV);
2015         }
2016
2017         dev = &rte_eth_devices[port_id];
2018         rte_eth_dev_info_get(port_id, &dev_info);
2019
2020         num_vfs = dev_info.max_vfs;
2021         if (vf > num_vfs)
2022         {
2023                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2024                 return (-EINVAL);
2025         }
2026         if (rx_mode == 0)
2027         {
2028                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2029                 return (-EINVAL);
2030         }
2031         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2032         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2033 }
2034
2035 /*
2036  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2037  * an empty spot.
2038  */
2039 static inline int
2040 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2041 {
2042         struct rte_eth_dev_info dev_info;
2043         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2044         unsigned i;
2045
2046         rte_eth_dev_info_get(port_id, &dev_info);
2047         if (!dev->data->hash_mac_addrs)
2048                 return -1;
2049
2050         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2051                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2052                         ETHER_ADDR_LEN) == 0)
2053                         return i;
2054
2055         return -1;
2056 }
2057
2058 int
2059 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2060                                 uint8_t on)
2061 {
2062         int index;
2063         int ret;
2064         struct rte_eth_dev *dev;
2065
2066         if (port_id >= nb_ports) {
2067                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2068                         port_id);
2069                 return (-ENODEV);
2070         }
2071
2072         dev = &rte_eth_devices[port_id];
2073         if (is_zero_ether_addr(addr)) {
2074                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2075                         port_id);
2076                 return (-EINVAL);
2077         }
2078
2079         index = get_hash_mac_addr_index(port_id, addr);
2080         /* Check if it's already there, and do nothing */
2081         if ((index >= 0) && (on))
2082                 return 0;
2083
2084         if (index < 0) {
2085                 if (!on) {
2086                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
2087                                 "set in UTA\n", port_id);
2088                         return (-EINVAL);
2089                 }
2090
2091                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2092                 if (index < 0) {
2093                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2094                                         port_id);
2095                         return (-ENOSPC);
2096                 }
2097         }
2098
2099         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2100         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2101         if (ret == 0) {
2102                 /* Update address in NIC data structure */
2103                 if (on)
2104                         ether_addr_copy(addr,
2105                                         &dev->data->hash_mac_addrs[index]);
2106                 else
2107                         ether_addr_copy(&null_mac_addr,
2108                                         &dev->data->hash_mac_addrs[index]);
2109         }
2110
2111         return ret;
2112 }
2113
2114 int
2115 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2116 {
2117         struct rte_eth_dev *dev;
2118
2119         if (port_id >= nb_ports) {
2120                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2121                         port_id);
2122                 return (-ENODEV);
2123         }
2124
2125         dev = &rte_eth_devices[port_id];
2126
2127         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2128         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2129 }
2130
2131 int
2132 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
2133 {
2134         uint16_t num_vfs;
2135         struct rte_eth_dev *dev;
2136         struct rte_eth_dev_info dev_info;
2137
2138         if (port_id >= nb_ports) {
2139                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2140                 return (-ENODEV);
2141         }
2142
2143         dev = &rte_eth_devices[port_id];
2144         rte_eth_dev_info_get(port_id, &dev_info);
2145
2146         num_vfs = dev_info.max_vfs;
2147         if (vf > num_vfs)
2148         {
2149                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2150                 return (-EINVAL);
2151         }
2152
2153         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2154         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2155 }
2156
2157 int
2158 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2159 {
2160         uint16_t num_vfs;
2161         struct rte_eth_dev *dev;
2162         struct rte_eth_dev_info dev_info;
2163
2164         if (port_id >= nb_ports) {
2165                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2166                 return (-ENODEV);
2167         }
2168
2169         dev = &rte_eth_devices[port_id];
2170         rte_eth_dev_info_get(port_id, &dev_info);
2171
2172         num_vfs = dev_info.max_vfs;
2173         if (vf > num_vfs)
2174         {
2175                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2176                 return (-EINVAL);
2177         }
2178
2179         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2180         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2181 }
2182
2183 int
2184 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2185                                  uint64_t vf_mask,uint8_t vlan_on)
2186 {
2187         struct rte_eth_dev *dev;
2188
2189         if (port_id >= nb_ports) {
2190                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2191                                 port_id);
2192                 return (-ENODEV);
2193         }
2194         dev = &rte_eth_devices[port_id];
2195
2196         if(vlan_id > ETHER_MAX_VLAN_ID)
2197         {
2198                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2199                         vlan_id);
2200                 return (-EINVAL);
2201         }
2202         if (vf_mask == 0)
2203         {
2204                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2205                 return (-EINVAL);
2206         }
2207
2208         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2209         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2210                                                 vf_mask,vlan_on);
2211 }
2212
2213 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2214                                         uint16_t tx_rate)
2215 {
2216         struct rte_eth_dev *dev;
2217         struct rte_eth_dev_info dev_info;
2218         struct rte_eth_link link;
2219
2220         if (port_id >= nb_ports) {
2221                 PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
2222                                 port_id);
2223                 return -ENODEV;
2224         }
2225
2226         dev = &rte_eth_devices[port_id];
2227         rte_eth_dev_info_get(port_id, &dev_info);
2228         link = dev->data->dev_link;
2229
2230         if (queue_idx > dev_info.max_tx_queues) {
2231                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2232                                 "invalid queue id=%d\n", port_id, queue_idx);
2233                 return -EINVAL;
2234         }
2235
2236         if (tx_rate > link.link_speed) {
2237                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2238                                 "bigger than link speed= %d\n",
2239                         tx_rate, link_speed);
2240                 return -EINVAL;
2241         }
2242
2243         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2244         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2245 }
2246
2247 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2248                                 uint64_t q_msk)
2249 {
2250         struct rte_eth_dev *dev;
2251         struct rte_eth_dev_info dev_info;
2252         struct rte_eth_link link;
2253
2254         if (q_msk == 0)
2255                 return 0;
2256
2257         if (port_id >= nb_ports) {
2258                 PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
2259                                 port_id);
2260                 return -ENODEV;
2261         }
2262
2263         dev = &rte_eth_devices[port_id];
2264         rte_eth_dev_info_get(port_id, &dev_info);
2265         link = dev->data->dev_link;
2266
2267         if (vf > dev_info.max_vfs) {
2268                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2269                                 "invalid vf id=%d\n", port_id, vf);
2270                 return -EINVAL;
2271         }
2272
2273         if (tx_rate > link.link_speed) {
2274                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2275                                 "bigger than link speed= %d\n",
2276                                 tx_rate, link_speed);
2277                 return -EINVAL;
2278         }
2279
2280         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2281         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2282 }
2283
2284 int
2285 rte_eth_mirror_rule_set(uint8_t port_id,
2286                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
2287                         uint8_t rule_id, uint8_t on)
2288 {
2289         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2290
2291         if (port_id >= nb_ports) {
2292                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2293                 return (-ENODEV);
2294         }
2295
2296         if (mirror_conf->rule_type_mask == 0) {
2297                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2298                 return (-EINVAL);
2299         }
2300
2301         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2302                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
2303                         "be 0-%d\n",ETH_64_POOLS - 1);
2304                 return (-EINVAL);
2305         }
2306
2307         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
2308                 (mirror_conf->pool_mask == 0)) {
2309                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
2310                                 "be 0.\n");
2311                 return (-EINVAL);
2312         }
2313
2314         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2315         {
2316                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2317                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
2318                 return (-EINVAL);
2319         }
2320
2321         dev = &rte_eth_devices[port_id];
2322         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2323
2324         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2325 }
2326
2327 int
2328 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2329 {
2330         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2331
2332         if (port_id >= nb_ports) {
2333                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2334                 return (-ENODEV);
2335         }
2336
2337         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2338         {
2339                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2340                         ETH_VMDQ_NUM_MIRROR_RULE-1);
2341                 return (-EINVAL);
2342         }
2343
2344         dev = &rte_eth_devices[port_id];
2345         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2346
2347         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2348 }
2349
2350 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2351 uint16_t
2352 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2353                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2354 {
2355         struct rte_eth_dev *dev;
2356
2357         if (port_id >= nb_ports) {
2358                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2359                 return 0;
2360         }
2361         dev = &rte_eth_devices[port_id];
2362         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
2363         if (queue_id >= dev->data->nb_rx_queues) {
2364                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2365                 return 0;
2366         }
2367         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2368                                                 rx_pkts, nb_pkts);
2369 }
2370
2371 uint16_t
2372 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2373                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2374 {
2375         struct rte_eth_dev *dev;
2376
2377         if (port_id >= nb_ports) {
2378                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2379                 return 0;
2380         }
2381         dev = &rte_eth_devices[port_id];
2382
2383         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
2384         if (queue_id >= dev->data->nb_tx_queues) {
2385                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2386                 return 0;
2387         }
2388         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2389                                                 tx_pkts, nb_pkts);
2390 }
2391
2392 uint32_t
2393 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2394 {
2395         struct rte_eth_dev *dev;
2396
2397         if (port_id >= nb_ports) {
2398                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2399                 return 0;
2400         }
2401         dev = &rte_eth_devices[port_id];
2402         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2403         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2404 }
2405
2406 int
2407 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2408 {
2409         struct rte_eth_dev *dev;
2410
2411         if (port_id >= nb_ports) {
2412                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2413                 return (-ENODEV);
2414         }
2415         dev = &rte_eth_devices[port_id];
2416         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2417         return (*dev->dev_ops->rx_descriptor_done)( \
2418                 dev->data->rx_queues[queue_id], offset);
2419 }
2420 #endif
2421
2422 int
2423 rte_eth_dev_callback_register(uint8_t port_id,
2424                         enum rte_eth_event_type event,
2425                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2426 {
2427         struct rte_eth_dev *dev;
2428         struct rte_eth_dev_callback *user_cb;
2429
2430         if (!cb_fn)
2431                 return (-EINVAL);
2432         if (port_id >= nb_ports) {
2433                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2434                 return (-EINVAL);
2435         }
2436
2437         dev = &rte_eth_devices[port_id];
2438         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2439
2440         TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
2441                 if (user_cb->cb_fn == cb_fn &&
2442                         user_cb->cb_arg == cb_arg &&
2443                         user_cb->event == event) {
2444                         break;
2445                 }
2446         }
2447
2448         /* create a new callback. */
2449         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2450                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2451                 user_cb->cb_fn = cb_fn;
2452                 user_cb->cb_arg = cb_arg;
2453                 user_cb->event = event;
2454                 TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
2455         }
2456
2457         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2458         return ((user_cb == NULL) ? -ENOMEM : 0);
2459 }
2460
2461 int
2462 rte_eth_dev_callback_unregister(uint8_t port_id,
2463                         enum rte_eth_event_type event,
2464                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2465 {
2466         int ret;
2467         struct rte_eth_dev *dev;
2468         struct rte_eth_dev_callback *cb, *next;
2469
2470         if (!cb_fn)
2471                 return (-EINVAL);
2472         if (port_id >= nb_ports) {
2473                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2474                 return (-EINVAL);
2475         }
2476
2477         dev = &rte_eth_devices[port_id];
2478         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2479
2480         ret = 0;
2481         for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
2482
2483                 next = TAILQ_NEXT(cb, next);
2484
2485                 if (cb->cb_fn != cb_fn || cb->event != event ||
2486                                 (cb->cb_arg != (void *)-1 &&
2487                                 cb->cb_arg != cb_arg))
2488                         continue;
2489
2490                 /*
2491                  * if this callback is not executing right now,
2492                  * then remove it.
2493                  */
2494                 if (cb->active == 0) {
2495                         TAILQ_REMOVE(&(dev->callbacks), cb, next);
2496                         rte_free(cb);
2497                 } else {
2498                         ret = -EAGAIN;
2499                 }
2500         }
2501
2502         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2503         return (ret);
2504 }
2505
2506 void
2507 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2508         enum rte_eth_event_type event)
2509 {
2510         struct rte_eth_dev_callback *cb_lst;
2511         struct rte_eth_dev_callback dev_cb;
2512
2513         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2514         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
2515                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2516                         continue;
2517                 dev_cb = *cb_lst;
2518                 cb_lst->active = 1;
2519                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2520                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2521                                                 dev_cb.cb_arg);
2522                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2523                 cb_lst->active = 0;
2524         }
2525         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2526 }
2527 #ifdef RTE_NIC_BYPASS
2528 int rte_eth_dev_bypass_init(uint8_t port_id)
2529 {
2530         struct rte_eth_dev *dev;
2531
2532         if (port_id >= nb_ports) {
2533                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2534                 return (-ENODEV);
2535         }
2536
2537         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2538                 PMD_DEBUG_TRACE("Invalid port device\n");
2539                 return (-ENODEV);
2540         }
2541
2542         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2543         (*dev->dev_ops->bypass_init)(dev);
2544         return 0;
2545 }
2546
2547 int
2548 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2549 {
2550         struct rte_eth_dev *dev;
2551
2552         if (port_id >= nb_ports) {
2553                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2554                 return (-ENODEV);
2555         }
2556
2557         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2558                 PMD_DEBUG_TRACE("Invalid port device\n");
2559                 return (-ENODEV);
2560         }
2561         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2562         (*dev->dev_ops->bypass_state_show)(dev, state);
2563         return 0;
2564 }
2565
2566 int
2567 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2568 {
2569         struct rte_eth_dev *dev;
2570
2571         if (port_id >= nb_ports) {
2572                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2573                 return (-ENODEV);
2574         }
2575
2576         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2577                 PMD_DEBUG_TRACE("Invalid port device\n");
2578                 return (-ENODEV);
2579         }
2580
2581         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2582         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2583         return 0;
2584 }
2585
2586 int
2587 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2588 {
2589         struct rte_eth_dev *dev;
2590
2591         if (port_id >= nb_ports) {
2592                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2593                 return (-ENODEV);
2594         }
2595
2596         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2597                 PMD_DEBUG_TRACE("Invalid port device\n");
2598                 return (-ENODEV);
2599         }
2600
2601         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2602         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2603         return 0;
2604 }
2605
2606 int
2607 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2608 {
2609         struct rte_eth_dev *dev;
2610
2611         if (port_id >= nb_ports) {
2612                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2613                 return (-ENODEV);
2614         }
2615
2616         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2617                 PMD_DEBUG_TRACE("Invalid port device\n");
2618                 return (-ENODEV);
2619         }
2620
2621         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2622         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2623         return 0;
2624 }
2625
2626 int
2627 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2628 {
2629         struct rte_eth_dev *dev;
2630
2631         if (port_id >= nb_ports) {
2632                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2633                 return (-ENODEV);
2634         }
2635
2636         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2637                 PMD_DEBUG_TRACE("Invalid port device\n");
2638                 return (-ENODEV);
2639         }
2640
2641         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2642         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2643         return 0;
2644 }
2645
2646 int
2647 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2648 {
2649         struct rte_eth_dev *dev;
2650
2651         if (port_id >= nb_ports) {
2652                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2653                 return (-ENODEV);
2654         }
2655
2656         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2657                 PMD_DEBUG_TRACE("Invalid port device\n");
2658                 return (-ENODEV);
2659         }
2660
2661         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2662         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2663         return 0;
2664 }
2665
2666 int
2667 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2668 {
2669         struct rte_eth_dev *dev;
2670
2671         if (port_id >= nb_ports) {
2672                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2673                 return (-ENODEV);
2674         }
2675
2676         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2677                 PMD_DEBUG_TRACE("Invalid port device\n");
2678                 return (-ENODEV);
2679         }
2680
2681         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2682         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2683         return 0;
2684 }
2685
2686 int
2687 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2688 {
2689         struct rte_eth_dev *dev;
2690
2691         if (port_id >= nb_ports) {
2692                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2693                 return (-ENODEV);
2694         }
2695
2696         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2697                 PMD_DEBUG_TRACE("Invalid port device\n");
2698                 return (-ENODEV);
2699         }
2700
2701         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2702         (*dev->dev_ops->bypass_wd_reset)(dev);
2703         return 0;
2704 }
2705 #endif
2706
2707 int
2708 rte_eth_dev_add_syn_filter(uint8_t port_id,
2709                         struct rte_syn_filter *filter, uint16_t rx_queue)
2710 {
2711         struct rte_eth_dev *dev;
2712
2713         if (port_id >= nb_ports) {
2714                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2715                 return -ENODEV;
2716         }
2717
2718         dev = &rte_eth_devices[port_id];
2719         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_syn_filter, -ENOTSUP);
2720         return (*dev->dev_ops->add_syn_filter)(dev, filter, rx_queue);
2721 }
2722
2723 int
2724 rte_eth_dev_remove_syn_filter(uint8_t port_id)
2725 {
2726         struct rte_eth_dev *dev;
2727
2728         if (port_id >= nb_ports) {
2729                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2730                 return -ENODEV;
2731         }
2732
2733         dev = &rte_eth_devices[port_id];
2734         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_syn_filter, -ENOTSUP);
2735         return (*dev->dev_ops->remove_syn_filter)(dev);
2736 }
2737
2738 int
2739 rte_eth_dev_get_syn_filter(uint8_t port_id,
2740                         struct rte_syn_filter *filter, uint16_t *rx_queue)
2741 {
2742         struct rte_eth_dev *dev;
2743
2744         if (filter == NULL || rx_queue == NULL)
2745                 return -EINVAL;
2746
2747         if (port_id >= nb_ports) {
2748                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2749                 return -ENODEV;
2750         }
2751
2752         dev = &rte_eth_devices[port_id];
2753         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_syn_filter, -ENOTSUP);
2754         return (*dev->dev_ops->get_syn_filter)(dev, filter, rx_queue);
2755 }
2756
2757 int
2758 rte_eth_dev_add_ethertype_filter(uint8_t port_id, uint16_t index,
2759                         struct rte_ethertype_filter *filter, uint16_t rx_queue)
2760 {
2761         struct rte_eth_dev *dev;
2762
2763         if (port_id >= nb_ports) {
2764                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2765                 return -ENODEV;
2766         }
2767         if (filter->ethertype == ETHER_TYPE_IPv4 ||
2768                 filter->ethertype == ETHER_TYPE_IPv6){
2769                 PMD_DEBUG_TRACE("IP and IPv6 are not supported"
2770                         " in ethertype filter\n");
2771                 return -EINVAL;
2772         }
2773         dev = &rte_eth_devices[port_id];
2774         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_ethertype_filter, -ENOTSUP);
2775         return (*dev->dev_ops->add_ethertype_filter)(dev, index,
2776                                         filter, rx_queue);
2777 }
2778
2779 int
2780 rte_eth_dev_remove_ethertype_filter(uint8_t port_id,  uint16_t index)
2781 {
2782         struct rte_eth_dev *dev;
2783
2784         if (port_id >= nb_ports) {
2785                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2786                 return -ENODEV;
2787         }
2788
2789         dev = &rte_eth_devices[port_id];
2790         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_ethertype_filter, -ENOTSUP);
2791         return (*dev->dev_ops->remove_ethertype_filter)(dev, index);
2792 }
2793
2794 int
2795 rte_eth_dev_get_ethertype_filter(uint8_t port_id, uint16_t index,
2796                         struct rte_ethertype_filter *filter, uint16_t *rx_queue)
2797 {
2798         struct rte_eth_dev *dev;
2799
2800         if (filter == NULL || rx_queue == NULL)
2801                 return -EINVAL;
2802
2803         if (port_id >= nb_ports) {
2804                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2805                 return -ENODEV;
2806         }
2807
2808         dev = &rte_eth_devices[port_id];
2809         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_ethertype_filter, -ENOTSUP);
2810         return (*dev->dev_ops->get_ethertype_filter)(dev, index,
2811                                                 filter, rx_queue);
2812 }
2813
2814 int
2815 rte_eth_dev_add_2tuple_filter(uint8_t port_id, uint16_t index,
2816                         struct rte_2tuple_filter *filter, uint16_t rx_queue)
2817 {
2818         struct rte_eth_dev *dev;
2819
2820         if (port_id >= nb_ports) {
2821                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2822                 return -ENODEV;
2823         }
2824         if (filter->protocol != IPPROTO_TCP &&
2825                 filter->tcp_flags != 0){
2826                 PMD_DEBUG_TRACE("tcp flags is 0x%x, but the protocol value"
2827                         " is not TCP\n",
2828                         filter->tcp_flags);
2829                 return -EINVAL;
2830         }
2831
2832         dev = &rte_eth_devices[port_id];
2833         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_2tuple_filter, -ENOTSUP);
2834         return (*dev->dev_ops->add_2tuple_filter)(dev, index, filter, rx_queue);
2835 }
2836
2837 int
2838 rte_eth_dev_remove_2tuple_filter(uint8_t port_id, uint16_t index)
2839 {
2840         struct rte_eth_dev *dev;
2841
2842         if (port_id >= nb_ports) {
2843                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2844                 return -ENODEV;
2845         }
2846
2847         dev = &rte_eth_devices[port_id];
2848         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_2tuple_filter, -ENOTSUP);
2849         return (*dev->dev_ops->remove_2tuple_filter)(dev, index);
2850 }
2851
2852 int
2853 rte_eth_dev_get_2tuple_filter(uint8_t port_id, uint16_t index,
2854                         struct rte_2tuple_filter *filter, uint16_t *rx_queue)
2855 {
2856         struct rte_eth_dev *dev;
2857
2858         if (filter == NULL || rx_queue == NULL)
2859                 return -EINVAL;
2860
2861         if (port_id >= nb_ports) {
2862                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2863                 return -ENODEV;
2864         }
2865
2866         dev = &rte_eth_devices[port_id];
2867         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_2tuple_filter, -ENOTSUP);
2868         return (*dev->dev_ops->get_2tuple_filter)(dev, index, filter, rx_queue);
2869 }
2870
2871 int
2872 rte_eth_dev_add_5tuple_filter(uint8_t port_id, uint16_t index,
2873                         struct rte_5tuple_filter *filter, uint16_t rx_queue)
2874 {
2875         struct rte_eth_dev *dev;
2876
2877         if (port_id >= nb_ports) {
2878                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2879                 return -ENODEV;
2880         }
2881
2882         if (filter->protocol != IPPROTO_TCP &&
2883                 filter->tcp_flags != 0){
2884                 PMD_DEBUG_TRACE("tcp flags is 0x%x, but the protocol value"
2885                         " is not TCP\n",
2886                         filter->tcp_flags);
2887                 return -EINVAL;
2888         }
2889
2890         dev = &rte_eth_devices[port_id];
2891         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_5tuple_filter, -ENOTSUP);
2892         return (*dev->dev_ops->add_5tuple_filter)(dev, index, filter, rx_queue);
2893 }
2894
2895 int
2896 rte_eth_dev_remove_5tuple_filter(uint8_t port_id, uint16_t index)
2897 {
2898         struct rte_eth_dev *dev;
2899
2900         if (port_id >= nb_ports) {
2901                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2902                 return -ENODEV;
2903         }
2904
2905         dev = &rte_eth_devices[port_id];
2906         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_5tuple_filter, -ENOTSUP);
2907         return (*dev->dev_ops->remove_5tuple_filter)(dev, index);
2908 }
2909
2910 int
2911 rte_eth_dev_get_5tuple_filter(uint8_t port_id, uint16_t index,
2912                         struct rte_5tuple_filter *filter, uint16_t *rx_queue)
2913 {
2914         struct rte_eth_dev *dev;
2915
2916         if (filter == NULL || rx_queue == NULL)
2917                 return -EINVAL;
2918
2919         if (port_id >= nb_ports) {
2920                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2921                 return -ENODEV;
2922         }
2923
2924         dev = &rte_eth_devices[port_id];
2925         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_5tuple_filter, -ENOTSUP);
2926         return (*dev->dev_ops->get_5tuple_filter)(dev, index, filter,
2927                                                 rx_queue);
2928 }
2929
2930 int
2931 rte_eth_dev_add_flex_filter(uint8_t port_id, uint16_t index,
2932                         struct rte_flex_filter *filter, uint16_t rx_queue)
2933 {
2934         struct rte_eth_dev *dev;
2935
2936         if (port_id >= nb_ports) {
2937                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2938                 return -ENODEV;
2939         }
2940
2941         dev = &rte_eth_devices[port_id];
2942         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_flex_filter, -ENOTSUP);
2943         return (*dev->dev_ops->add_flex_filter)(dev, index, filter, rx_queue);
2944 }
2945
2946 int
2947 rte_eth_dev_remove_flex_filter(uint8_t port_id, uint16_t index)
2948 {
2949         struct rte_eth_dev *dev;
2950
2951         if (port_id >= nb_ports) {
2952                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2953                 return -ENODEV;
2954         }
2955
2956         dev = &rte_eth_devices[port_id];
2957         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_flex_filter, -ENOTSUP);
2958         return (*dev->dev_ops->remove_flex_filter)(dev, index);
2959 }
2960
2961 int
2962 rte_eth_dev_get_flex_filter(uint8_t port_id, uint16_t index,
2963                         struct rte_flex_filter *filter, uint16_t *rx_queue)
2964 {
2965         struct rte_eth_dev *dev;
2966
2967         if (filter == NULL || rx_queue == NULL)
2968                 return -EINVAL;
2969
2970         if (port_id >= nb_ports) {
2971                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2972                 return -ENODEV;
2973         }
2974
2975         dev = &rte_eth_devices[port_id];
2976         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_flex_filter, -ENOTSUP);
2977         return (*dev->dev_ops->get_flex_filter)(dev, index, filter,
2978                                                 rx_queue);
2979 }