ethdev: MTU accessors
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while(0)
87 #define PROC_PRIMARY_OR_RET() do { \
88         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
89                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
90                 return; \
91         } \
92 } while(0)
93
94 /* Macros to check for invlaid function pointers in dev_ops structure */
95 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
96         if ((func) == NULL) { \
97                 PMD_DEBUG_TRACE("Function not supported\n"); \
98                 return (retval); \
99         } \
100 } while(0)
101 #define FUNC_PTR_OR_RET(func) do { \
102         if ((func) == NULL) { \
103                 PMD_DEBUG_TRACE("Function not supported\n"); \
104                 return; \
105         } \
106 } while(0)
107
108 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
109 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
110 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
111 static uint8_t nb_ports = 0;
112
113 /* spinlock for eth device callbacks */
114 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
115
116 /**
117  * The user application callback description.
118  *
119  * It contains callback address to be registered by user application,
120  * the pointer to the parameters for callback, and the event type.
121  */
122 struct rte_eth_dev_callback {
123         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
124         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
125         void *cb_arg;                           /**< Parameter for callback */
126         enum rte_eth_event_type event;          /**< Interrupt event type */
127         uint32_t active;                        /**< Callback is executing */
128 };
129
130 enum {
131         STAT_QMAP_TX = 0,
132         STAT_QMAP_RX
133 };
134
135 static inline void
136 rte_eth_dev_data_alloc(void)
137 {
138         const unsigned flags = 0;
139         const struct rte_memzone *mz;
140
141         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
142                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
143                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
144                                 rte_socket_id(), flags);
145         } else
146                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
147         if (mz == NULL)
148                 rte_panic("Cannot allocate memzone for ethernet port data\n");
149
150         rte_eth_dev_data = mz->addr;
151         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
152                 memset(rte_eth_dev_data, 0,
153                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
154 }
155
156 struct rte_eth_dev *
157 rte_eth_dev_allocate(void)
158 {
159         struct rte_eth_dev *eth_dev;
160
161         if (nb_ports == RTE_MAX_ETHPORTS) {
162                 PMD_DEBUG_TRACE("Reached maximum number of ethernet ports\n");
163                 return NULL;
164         }
165
166         if (rte_eth_dev_data == NULL)
167                 rte_eth_dev_data_alloc();
168
169         eth_dev = &rte_eth_devices[nb_ports];
170         eth_dev->data = &rte_eth_dev_data[nb_ports];
171         eth_dev->data->port_id = nb_ports++;
172         return eth_dev;
173 }
174
175 static int
176 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
177                  struct rte_pci_device *pci_dev)
178 {
179         struct eth_driver    *eth_drv;
180         struct rte_eth_dev *eth_dev;
181         int diag;
182
183         eth_drv = (struct eth_driver *)pci_drv;
184
185         eth_dev = rte_eth_dev_allocate();
186         if (eth_dev == NULL)
187                 return -ENOMEM;
188
189         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
190                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
191                                   eth_drv->dev_private_size,
192                                   CACHE_LINE_SIZE);
193                 if (eth_dev->data->dev_private == NULL)
194                         rte_panic("Cannot allocate memzone for private port data\n");
195         }
196         eth_dev->pci_dev = pci_dev;
197         eth_dev->driver = eth_drv;
198         eth_dev->data->rx_mbuf_alloc_failed = 0;
199
200         /* init user callbacks */
201         TAILQ_INIT(&(eth_dev->callbacks));
202
203         /*
204          * Set the default MTU.
205          */
206         eth_dev->data->mtu = ETHER_MTU;
207
208         /* Invoke PMD device initialization function */
209         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
210         if (diag == 0)
211                 return (0);
212
213         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
214                         " failed\n", pci_drv->name,
215                         (unsigned) pci_dev->id.vendor_id,
216                         (unsigned) pci_dev->id.device_id);
217         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
218                 rte_free(eth_dev->data->dev_private);
219         nb_ports--;
220         return diag;
221 }
222
223 /**
224  * Register an Ethernet [Poll Mode] driver.
225  *
226  * Function invoked by the initialization function of an Ethernet driver
227  * to simultaneously register itself as a PCI driver and as an Ethernet
228  * Poll Mode Driver.
229  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
230  * structure embedded in the *eth_drv* structure, after having stored the
231  * address of the rte_eth_dev_init() function in the *devinit* field of
232  * the *pci_drv* structure.
233  * During the PCI probing phase, the rte_eth_dev_init() function is
234  * invoked for each PCI [Ethernet device] matching the embedded PCI
235  * identifiers provided by the driver.
236  */
237 void
238 rte_eth_driver_register(struct eth_driver *eth_drv)
239 {
240         eth_drv->pci_drv.devinit = rte_eth_dev_init;
241         rte_eal_pci_register(&eth_drv->pci_drv);
242 }
243
244 int
245 rte_eth_dev_socket_id(uint8_t port_id)
246 {
247         if (port_id >= nb_ports)
248                 return -1;
249         return rte_eth_devices[port_id].pci_dev->numa_node;
250 }
251
252 uint8_t
253 rte_eth_dev_count(void)
254 {
255         return (nb_ports);
256 }
257
258 static int
259 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
260 {
261         uint16_t old_nb_queues = dev->data->nb_rx_queues;
262         void **rxq;
263         unsigned i;
264
265         if (dev->data->rx_queues == NULL) { /* first time configuration */
266                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
267                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
268                                 CACHE_LINE_SIZE);
269                 if (dev->data->rx_queues == NULL) {
270                         dev->data->nb_rx_queues = 0;
271                         return -(ENOMEM);
272                 }
273         } else { /* re-configure */
274                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
275
276                 rxq = dev->data->rx_queues;
277
278                 for (i = nb_queues; i < old_nb_queues; i++)
279                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
280                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
281                                 CACHE_LINE_SIZE);
282                 if (rxq == NULL)
283                         return -(ENOMEM);
284
285                 if (nb_queues > old_nb_queues)
286                         memset(rxq + old_nb_queues, 0,
287                                 sizeof(rxq[0]) * (nb_queues - old_nb_queues));
288
289                 dev->data->rx_queues = rxq;
290
291         }
292         dev->data->nb_rx_queues = nb_queues;
293         return (0);
294 }
295
296 int
297 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
298 {
299         struct rte_eth_dev *dev;
300
301         /* This function is only safe when called from the primary process
302          * in a multi-process setup*/
303         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
304
305         if (port_id >= nb_ports) {
306                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
307                 return -EINVAL;
308         }
309
310         dev = &rte_eth_devices[port_id];
311         if (rx_queue_id >= dev->data->nb_rx_queues) {
312                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
313                 return -EINVAL;
314         }
315
316         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
317
318         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
319
320 }
321
322 int
323 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
324 {
325         struct rte_eth_dev *dev;
326
327         /* This function is only safe when called from the primary process
328          * in a multi-process setup*/
329         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
330
331         if (port_id >= nb_ports) {
332                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
333                 return -EINVAL;
334         }
335
336         dev = &rte_eth_devices[port_id];
337         if (rx_queue_id >= dev->data->nb_rx_queues) {
338                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
339                 return -EINVAL;
340         }
341
342         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
343
344         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
345
346 }
347
348 int
349 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
350 {
351         struct rte_eth_dev *dev;
352
353         /* This function is only safe when called from the primary process
354          * in a multi-process setup*/
355         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
356
357         if (port_id >= nb_ports) {
358                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
359                 return -EINVAL;
360         }
361
362         dev = &rte_eth_devices[port_id];
363         if (tx_queue_id >= dev->data->nb_tx_queues) {
364                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
365                 return -EINVAL;
366         }
367
368         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
369
370         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
371
372 }
373
374 int
375 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
376 {
377         struct rte_eth_dev *dev;
378
379         /* This function is only safe when called from the primary process
380          * in a multi-process setup*/
381         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
382
383         if (port_id >= nb_ports) {
384                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
385                 return -EINVAL;
386         }
387
388         dev = &rte_eth_devices[port_id];
389         if (tx_queue_id >= dev->data->nb_tx_queues) {
390                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
391                 return -EINVAL;
392         }
393
394         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
395
396         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
397
398 }
399
400 static int
401 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
402 {
403         uint16_t old_nb_queues = dev->data->nb_tx_queues;
404         void **txq;
405         unsigned i;
406
407         if (dev->data->tx_queues == NULL) { /* first time configuration */
408                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
409                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
410                                 CACHE_LINE_SIZE);
411                 if (dev->data->tx_queues == NULL) {
412                         dev->data->nb_tx_queues = 0;
413                         return -(ENOMEM);
414                 }
415         } else { /* re-configure */
416                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
417
418                 txq = dev->data->tx_queues;
419
420                 for (i = nb_queues; i < old_nb_queues; i++)
421                         (*dev->dev_ops->tx_queue_release)(txq[i]);
422                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
423                                 CACHE_LINE_SIZE);
424                 if (txq == NULL)
425                         return -(ENOMEM);
426
427                 if (nb_queues > old_nb_queues)
428                         memset(txq + old_nb_queues, 0,
429                                 sizeof(txq[0]) * (nb_queues - old_nb_queues));
430
431                 dev->data->tx_queues = txq;
432
433         }
434         dev->data->nb_tx_queues = nb_queues;
435         return (0);
436 }
437
438 static int
439 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
440                       const struct rte_eth_conf *dev_conf)
441 {
442         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
443
444         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
445                 /* check multi-queue mode */
446                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ||
447                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
448                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
449                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
450                         /* SRIOV only works in VMDq enable mode */
451                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
452                                         " SRIOV active, "
453                                         "wrong VMDQ mq_mode rx %u tx %u\n",
454                                         port_id,
455                                         dev_conf->rxmode.mq_mode,
456                                         dev_conf->txmode.mq_mode);
457                         return (-EINVAL);
458                 }
459
460                 switch (dev_conf->rxmode.mq_mode) {
461                 case ETH_MQ_RX_VMDQ_RSS:
462                 case ETH_MQ_RX_VMDQ_DCB:
463                 case ETH_MQ_RX_VMDQ_DCB_RSS:
464                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
465                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
466                                         " SRIOV active, "
467                                         "unsupported VMDQ mq_mode rx %u\n",
468                                         port_id, dev_conf->rxmode.mq_mode);
469                         return (-EINVAL);
470                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
471                         /* if nothing mq mode configure, use default scheme */
472                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
473                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
474                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
475                         break;
476                 }
477
478                 switch (dev_conf->txmode.mq_mode) {
479                 case ETH_MQ_TX_VMDQ_DCB:
480                         /* DCB VMDQ in SRIOV mode, not implement yet */
481                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
482                                         " SRIOV active, "
483                                         "unsupported VMDQ mq_mode tx %u\n",
484                                         port_id, dev_conf->txmode.mq_mode);
485                         return (-EINVAL);
486                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
487                         /* if nothing mq mode configure, use default scheme */
488                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
489                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
490                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
491                         break;
492                 }
493
494                 /* check valid queue number */
495                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
496                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
497                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
498                                     "queue number must less equal to %d\n",
499                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
500                         return (-EINVAL);
501                 }
502         } else {
503                 /* For vmdb+dcb mode check our configuration before we go further */
504                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
505                         const struct rte_eth_vmdq_dcb_conf *conf;
506
507                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
508                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
509                                                 "!= %d\n",
510                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
511                                 return (-EINVAL);
512                         }
513                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
514                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
515                                conf->nb_queue_pools == ETH_32_POOLS)) {
516                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
517                                                 "nb_queue_pools must be %d or %d\n",
518                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
519                                 return (-EINVAL);
520                         }
521                 }
522                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
523                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
524
525                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
526                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
527                                                 "!= %d\n",
528                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
529                                 return (-EINVAL);
530                         }
531                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
532                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
533                                conf->nb_queue_pools == ETH_32_POOLS)) {
534                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
535                                                 "nb_queue_pools != %d or nb_queue_pools "
536                                                 "!= %d\n",
537                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
538                                 return (-EINVAL);
539                         }
540                 }
541
542                 /* For DCB mode check our configuration before we go further */
543                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
544                         const struct rte_eth_dcb_rx_conf *conf;
545
546                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
547                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
548                                                 "!= %d\n",
549                                                 port_id, ETH_DCB_NUM_QUEUES);
550                                 return (-EINVAL);
551                         }
552                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
553                         if (! (conf->nb_tcs == ETH_4_TCS ||
554                                conf->nb_tcs == ETH_8_TCS)) {
555                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
556                                                 "nb_tcs != %d or nb_tcs "
557                                                 "!= %d\n",
558                                                 port_id, ETH_4_TCS, ETH_8_TCS);
559                                 return (-EINVAL);
560                         }
561                 }
562
563                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
564                         const struct rte_eth_dcb_tx_conf *conf;
565
566                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
567                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
568                                                 "!= %d\n",
569                                                 port_id, ETH_DCB_NUM_QUEUES);
570                                 return (-EINVAL);
571                         }
572                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
573                         if (! (conf->nb_tcs == ETH_4_TCS ||
574                                conf->nb_tcs == ETH_8_TCS)) {
575                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
576                                                 "nb_tcs != %d or nb_tcs "
577                                                 "!= %d\n",
578                                                 port_id, ETH_4_TCS, ETH_8_TCS);
579                                 return (-EINVAL);
580                         }
581                 }
582         }
583         return 0;
584 }
585
586 int
587 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
588                       const struct rte_eth_conf *dev_conf)
589 {
590         struct rte_eth_dev *dev;
591         struct rte_eth_dev_info dev_info;
592         int diag;
593
594         /* This function is only safe when called from the primary process
595          * in a multi-process setup*/
596         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
597
598         if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
599                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
600                 return (-EINVAL);
601         }
602         dev = &rte_eth_devices[port_id];
603
604         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
605         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
606
607         if (dev->data->dev_started) {
608                 PMD_DEBUG_TRACE(
609                     "port %d must be stopped to allow configuration\n", port_id);
610                 return (-EBUSY);
611         }
612
613         /*
614          * Check that the numbers of RX and TX queues are not greater
615          * than the maximum number of RX and TX queues supported by the
616          * configured device.
617          */
618         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
619         if (nb_rx_q > dev_info.max_rx_queues) {
620                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
621                                 port_id, nb_rx_q, dev_info.max_rx_queues);
622                 return (-EINVAL);
623         }
624         if (nb_rx_q == 0) {
625                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
626                 return (-EINVAL);
627         }
628
629         if (nb_tx_q > dev_info.max_tx_queues) {
630                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
631                                 port_id, nb_tx_q, dev_info.max_tx_queues);
632                 return (-EINVAL);
633         }
634         if (nb_tx_q == 0) {
635                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
636                 return (-EINVAL);
637         }
638
639         /* Copy the dev_conf parameter into the dev structure */
640         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
641
642         /*
643          * If jumbo frames are enabled, check that the maximum RX packet
644          * length is supported by the configured device.
645          */
646         if (dev_conf->rxmode.jumbo_frame == 1) {
647                 if (dev_conf->rxmode.max_rx_pkt_len >
648                     dev_info.max_rx_pktlen) {
649                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
650                                 " > max valid value %u\n",
651                                 port_id,
652                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
653                                 (unsigned)dev_info.max_rx_pktlen);
654                         return (-EINVAL);
655                 }
656                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
657                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
658                                 " < min valid value %u\n",
659                                 port_id,
660                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
661                                 (unsigned)ETHER_MIN_LEN);
662                         return (-EINVAL);
663                 }
664         } else {
665                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
666                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
667                         /* Use default value */
668                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
669                                                         ETHER_MAX_LEN;
670         }
671
672         /* multipe queue mode checking */
673         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
674         if (diag != 0) {
675                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
676                                 port_id, diag);
677                 return diag;
678         }
679
680         /*
681          * Setup new number of RX/TX queues and reconfigure device.
682          */
683         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
684         if (diag != 0) {
685                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
686                                 port_id, diag);
687                 return diag;
688         }
689
690         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
691         if (diag != 0) {
692                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
693                                 port_id, diag);
694                 rte_eth_dev_rx_queue_config(dev, 0);
695                 return diag;
696         }
697
698         diag = (*dev->dev_ops->dev_configure)(dev);
699         if (diag != 0) {
700                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
701                                 port_id, diag);
702                 rte_eth_dev_rx_queue_config(dev, 0);
703                 rte_eth_dev_tx_queue_config(dev, 0);
704                 return diag;
705         }
706
707         return 0;
708 }
709
710 static void
711 rte_eth_dev_config_restore(uint8_t port_id)
712 {
713         struct rte_eth_dev *dev;
714         struct rte_eth_dev_info dev_info;
715         struct ether_addr addr;
716         uint16_t i;
717         uint32_t pool = 0;
718
719         dev = &rte_eth_devices[port_id];
720
721         rte_eth_dev_info_get(port_id, &dev_info);
722
723         if (RTE_ETH_DEV_SRIOV(dev).active)
724                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
725
726         /* replay MAC address configuration */
727         for (i = 0; i < dev_info.max_mac_addrs; i++) {
728                 addr = dev->data->mac_addrs[i];
729
730                 /* skip zero address */
731                 if (is_zero_ether_addr(&addr))
732                         continue;
733
734                 /* add address to the hardware */
735                 if  (*dev->dev_ops->mac_addr_add)
736                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
737                 else {
738                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
739                                         port_id);
740                         /* exit the loop but not return an error */
741                         break;
742                 }
743         }
744
745         /* replay promiscuous configuration */
746         if (rte_eth_promiscuous_get(port_id) == 1)
747                 rte_eth_promiscuous_enable(port_id);
748         else if (rte_eth_promiscuous_get(port_id) == 0)
749                 rte_eth_promiscuous_disable(port_id);
750
751         /* replay allmulticast configuration */
752         if (rte_eth_allmulticast_get(port_id) == 1)
753                 rte_eth_allmulticast_enable(port_id);
754         else if (rte_eth_allmulticast_get(port_id) == 0)
755                 rte_eth_allmulticast_disable(port_id);
756 }
757
758 int
759 rte_eth_dev_start(uint8_t port_id)
760 {
761         struct rte_eth_dev *dev;
762         int diag;
763
764         /* This function is only safe when called from the primary process
765          * in a multi-process setup*/
766         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
767
768         if (port_id >= nb_ports) {
769                 PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
770                 return (-EINVAL);
771         }
772         dev = &rte_eth_devices[port_id];
773
774         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
775
776         if (dev->data->dev_started != 0) {
777                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
778                         " already started\n",
779                         port_id);
780                 return (0);
781         }
782
783         diag = (*dev->dev_ops->dev_start)(dev);
784         if (diag == 0)
785                 dev->data->dev_started = 1;
786         else
787                 return diag;
788
789         rte_eth_dev_config_restore(port_id);
790
791         return 0;
792 }
793
794 void
795 rte_eth_dev_stop(uint8_t port_id)
796 {
797         struct rte_eth_dev *dev;
798
799         /* This function is only safe when called from the primary process
800          * in a multi-process setup*/
801         PROC_PRIMARY_OR_RET();
802
803         if (port_id >= nb_ports) {
804                 PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
805                 return;
806         }
807         dev = &rte_eth_devices[port_id];
808
809         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
810
811         if (dev->data->dev_started == 0) {
812                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
813                         " already stopped\n",
814                         port_id);
815                 return;
816         }
817
818         dev->data->dev_started = 0;
819         (*dev->dev_ops->dev_stop)(dev);
820 }
821
822 int
823 rte_eth_dev_set_link_up(uint8_t port_id)
824 {
825         struct rte_eth_dev *dev;
826
827         /* This function is only safe when called from the primary process
828          * in a multi-process setup*/
829         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
830
831         if (port_id >= nb_ports) {
832                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
833                 return -EINVAL;
834         }
835         dev = &rte_eth_devices[port_id];
836
837         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
838         return (*dev->dev_ops->dev_set_link_up)(dev);
839 }
840
841 int
842 rte_eth_dev_set_link_down(uint8_t port_id)
843 {
844         struct rte_eth_dev *dev;
845
846         /* This function is only safe when called from the primary process
847          * in a multi-process setup*/
848         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
849
850         if (port_id >= nb_ports) {
851                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
852                 return -EINVAL;
853         }
854         dev = &rte_eth_devices[port_id];
855
856         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
857         return (*dev->dev_ops->dev_set_link_down)(dev);
858 }
859
860 void
861 rte_eth_dev_close(uint8_t port_id)
862 {
863         struct rte_eth_dev *dev;
864
865         /* This function is only safe when called from the primary process
866          * in a multi-process setup*/
867         PROC_PRIMARY_OR_RET();
868
869         if (port_id >= nb_ports) {
870                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
871                 return;
872         }
873
874         dev = &rte_eth_devices[port_id];
875
876         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
877         dev->data->dev_started = 0;
878         (*dev->dev_ops->dev_close)(dev);
879 }
880
881 int
882 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
883                        uint16_t nb_rx_desc, unsigned int socket_id,
884                        const struct rte_eth_rxconf *rx_conf,
885                        struct rte_mempool *mp)
886 {
887         int ret;
888         uint32_t mbp_buf_size;
889         struct rte_eth_dev *dev;
890         struct rte_pktmbuf_pool_private *mbp_priv;
891         struct rte_eth_dev_info dev_info;
892
893         /* This function is only safe when called from the primary process
894          * in a multi-process setup*/
895         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
896
897         if (port_id >= nb_ports) {
898                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
899                 return (-EINVAL);
900         }
901         dev = &rte_eth_devices[port_id];
902         if (rx_queue_id >= dev->data->nb_rx_queues) {
903                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
904                 return (-EINVAL);
905         }
906
907         if (dev->data->dev_started) {
908                 PMD_DEBUG_TRACE(
909                     "port %d must be stopped to allow configuration\n", port_id);
910                 return -EBUSY;
911         }
912
913         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
914         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
915
916         /*
917          * Check the size of the mbuf data buffer.
918          * This value must be provided in the private data of the memory pool.
919          * First check that the memory pool has a valid private data.
920          */
921         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
922         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
923                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
924                                 mp->name, (int) mp->private_data_size,
925                                 (int) sizeof(struct rte_pktmbuf_pool_private));
926                 return (-ENOSPC);
927         }
928         mbp_priv = rte_mempool_get_priv(mp);
929         mbp_buf_size = mbp_priv->mbuf_data_room_size;
930
931         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
932                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
933                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
934                                 "=%d)\n",
935                                 mp->name,
936                                 (int)mbp_buf_size,
937                                 (int)(RTE_PKTMBUF_HEADROOM +
938                                       dev_info.min_rx_bufsize),
939                                 (int)RTE_PKTMBUF_HEADROOM,
940                                 (int)dev_info.min_rx_bufsize);
941                 return (-EINVAL);
942         }
943
944         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
945                                               socket_id, rx_conf, mp);
946         if (!ret) {
947                 if (!dev->data->min_rx_buf_size ||
948                     dev->data->min_rx_buf_size > mbp_buf_size)
949                         dev->data->min_rx_buf_size = mbp_buf_size;
950         }
951
952         return ret;
953 }
954
955 int
956 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
957                        uint16_t nb_tx_desc, unsigned int socket_id,
958                        const struct rte_eth_txconf *tx_conf)
959 {
960         struct rte_eth_dev *dev;
961
962         /* This function is only safe when called from the primary process
963          * in a multi-process setup*/
964         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
965
966         if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
967                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
968                 return (-EINVAL);
969         }
970         dev = &rte_eth_devices[port_id];
971         if (tx_queue_id >= dev->data->nb_tx_queues) {
972                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
973                 return (-EINVAL);
974         }
975
976         if (dev->data->dev_started) {
977                 PMD_DEBUG_TRACE(
978                     "port %d must be stopped to allow configuration\n", port_id);
979                 return -EBUSY;
980         }
981
982         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
983         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
984                                                socket_id, tx_conf);
985 }
986
987 void
988 rte_eth_promiscuous_enable(uint8_t port_id)
989 {
990         struct rte_eth_dev *dev;
991
992         if (port_id >= nb_ports) {
993                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
994                 return;
995         }
996         dev = &rte_eth_devices[port_id];
997
998         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
999         (*dev->dev_ops->promiscuous_enable)(dev);
1000         dev->data->promiscuous = 1;
1001 }
1002
1003 void
1004 rte_eth_promiscuous_disable(uint8_t port_id)
1005 {
1006         struct rte_eth_dev *dev;
1007
1008         if (port_id >= nb_ports) {
1009                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1010                 return;
1011         }
1012         dev = &rte_eth_devices[port_id];
1013
1014         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1015         dev->data->promiscuous = 0;
1016         (*dev->dev_ops->promiscuous_disable)(dev);
1017 }
1018
1019 int
1020 rte_eth_promiscuous_get(uint8_t port_id)
1021 {
1022         struct rte_eth_dev *dev;
1023
1024         if (port_id >= nb_ports) {
1025                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1026                 return -1;
1027         }
1028
1029         dev = &rte_eth_devices[port_id];
1030         return dev->data->promiscuous;
1031 }
1032
1033 void
1034 rte_eth_allmulticast_enable(uint8_t port_id)
1035 {
1036         struct rte_eth_dev *dev;
1037
1038         if (port_id >= nb_ports) {
1039                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1040                 return;
1041         }
1042         dev = &rte_eth_devices[port_id];
1043
1044         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1045         (*dev->dev_ops->allmulticast_enable)(dev);
1046         dev->data->all_multicast = 1;
1047 }
1048
1049 void
1050 rte_eth_allmulticast_disable(uint8_t port_id)
1051 {
1052         struct rte_eth_dev *dev;
1053
1054         if (port_id >= nb_ports) {
1055                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1056                 return;
1057         }
1058         dev = &rte_eth_devices[port_id];
1059
1060         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1061         dev->data->all_multicast = 0;
1062         (*dev->dev_ops->allmulticast_disable)(dev);
1063 }
1064
1065 int
1066 rte_eth_allmulticast_get(uint8_t port_id)
1067 {
1068         struct rte_eth_dev *dev;
1069
1070         if (port_id >= nb_ports) {
1071                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1072                 return -1;
1073         }
1074
1075         dev = &rte_eth_devices[port_id];
1076         return dev->data->all_multicast;
1077 }
1078
1079 static inline int
1080 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1081                                 struct rte_eth_link *link)
1082 {
1083         struct rte_eth_link *dst = link;
1084         struct rte_eth_link *src = &(dev->data->dev_link);
1085
1086         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1087                                         *(uint64_t *)src) == 0)
1088                 return -1;
1089
1090         return 0;
1091 }
1092
1093 void
1094 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1095 {
1096         struct rte_eth_dev *dev;
1097
1098         if (port_id >= nb_ports) {
1099                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1100                 return;
1101         }
1102         dev = &rte_eth_devices[port_id];
1103         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1104
1105         if (dev->data->dev_conf.intr_conf.lsc != 0)
1106                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1107         else {
1108                 (*dev->dev_ops->link_update)(dev, 1);
1109                 *eth_link = dev->data->dev_link;
1110         }
1111 }
1112
1113 void
1114 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1115 {
1116         struct rte_eth_dev *dev;
1117
1118         if (port_id >= nb_ports) {
1119                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1120                 return;
1121         }
1122         dev = &rte_eth_devices[port_id];
1123         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1124
1125         if (dev->data->dev_conf.intr_conf.lsc != 0)
1126                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1127         else {
1128                 (*dev->dev_ops->link_update)(dev, 0);
1129                 *eth_link = dev->data->dev_link;
1130         }
1131 }
1132
1133 void
1134 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1135 {
1136         struct rte_eth_dev *dev;
1137
1138         if (port_id >= nb_ports) {
1139                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1140                 return;
1141         }
1142         dev = &rte_eth_devices[port_id];
1143         memset(stats, 0, sizeof(*stats));
1144
1145         FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
1146         (*dev->dev_ops->stats_get)(dev, stats);
1147         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1148 }
1149
1150 void
1151 rte_eth_stats_reset(uint8_t port_id)
1152 {
1153         struct rte_eth_dev *dev;
1154
1155         if (port_id >= nb_ports) {
1156                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1157                 return;
1158         }
1159         dev = &rte_eth_devices[port_id];
1160
1161         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1162         (*dev->dev_ops->stats_reset)(dev);
1163 }
1164
1165
1166 static int
1167 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1168                 uint8_t is_rx)
1169 {
1170         struct rte_eth_dev *dev;
1171
1172         if (port_id >= nb_ports) {
1173                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1174                 return -ENODEV;
1175         }
1176         dev = &rte_eth_devices[port_id];
1177
1178         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1179         return (*dev->dev_ops->queue_stats_mapping_set)
1180                         (dev, queue_id, stat_idx, is_rx);
1181 }
1182
1183
1184 int
1185 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1186                 uint8_t stat_idx)
1187 {
1188         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1189                         STAT_QMAP_TX);
1190 }
1191
1192
1193 int
1194 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1195                 uint8_t stat_idx)
1196 {
1197         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1198                         STAT_QMAP_RX);
1199 }
1200
1201
1202 void
1203 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1204 {
1205         struct rte_eth_dev *dev;
1206
1207         if (port_id >= nb_ports) {
1208                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1209                 return;
1210         }
1211         dev = &rte_eth_devices[port_id];
1212
1213         /* Default device offload capabilities to zero */
1214         dev_info->rx_offload_capa = 0;
1215         dev_info->tx_offload_capa = 0;
1216         dev_info->if_index = 0;
1217         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1218         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1219         dev_info->pci_dev = dev->pci_dev;
1220         if (dev->driver)
1221                 dev_info->driver_name = dev->driver->pci_drv.name;
1222 }
1223
1224 void
1225 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1226 {
1227         struct rte_eth_dev *dev;
1228
1229         if (port_id >= nb_ports) {
1230                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1231                 return;
1232         }
1233         dev = &rte_eth_devices[port_id];
1234         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1235 }
1236
1237
1238 int
1239 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1240 {
1241         struct rte_eth_dev *dev;
1242
1243         if (port_id >= nb_ports) {
1244                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1245                 return (-ENODEV);
1246         }
1247
1248         dev = &rte_eth_devices[port_id];
1249         *mtu = dev->data->mtu;
1250         return 0;
1251 }
1252
1253 int
1254 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1255 {
1256         int ret;
1257         struct rte_eth_dev *dev;
1258
1259         if (port_id >= nb_ports) {
1260                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1261                 return (-ENODEV);
1262         }
1263
1264         dev = &rte_eth_devices[port_id];
1265         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1266
1267         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1268         if (!ret)
1269                 dev->data->mtu = mtu;
1270
1271         return ret;
1272 }
1273
1274 int
1275 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1276 {
1277         struct rte_eth_dev *dev;
1278
1279         if (port_id >= nb_ports) {
1280                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1281                 return (-ENODEV);
1282         }
1283         dev = &rte_eth_devices[port_id];
1284         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1285                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1286                 return (-ENOSYS);
1287         }
1288
1289         if (vlan_id > 4095) {
1290                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1291                                 port_id, (unsigned) vlan_id);
1292                 return (-EINVAL);
1293         }
1294         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1295         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1296         return (0);
1297 }
1298
1299 int
1300 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1301 {
1302         struct rte_eth_dev *dev;
1303
1304         if (port_id >= nb_ports) {
1305                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1306                 return (-ENODEV);
1307         }
1308
1309         dev = &rte_eth_devices[port_id];
1310         if (rx_queue_id >= dev->data->nb_rx_queues) {
1311                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1312                 return (-EINVAL);
1313         }
1314
1315         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1316         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1317
1318         return (0);
1319 }
1320
1321 int
1322 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1323 {
1324         struct rte_eth_dev *dev;
1325
1326         if (port_id >= nb_ports) {
1327                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1328                 return (-ENODEV);
1329         }
1330
1331         dev = &rte_eth_devices[port_id];
1332         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1333         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1334
1335         return (0);
1336 }
1337
1338 int
1339 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1340 {
1341         struct rte_eth_dev *dev;
1342         int ret = 0;
1343         int mask = 0;
1344         int cur, org = 0;
1345
1346         if (port_id >= nb_ports) {
1347                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1348                 return (-ENODEV);
1349         }
1350
1351         dev = &rte_eth_devices[port_id];
1352
1353         /*check which option changed by application*/
1354         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1355         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1356         if (cur != org){
1357                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1358                 mask |= ETH_VLAN_STRIP_MASK;
1359         }
1360
1361         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1362         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1363         if (cur != org){
1364                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1365                 mask |= ETH_VLAN_FILTER_MASK;
1366         }
1367
1368         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1369         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1370         if (cur != org){
1371                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1372                 mask |= ETH_VLAN_EXTEND_MASK;
1373         }
1374
1375         /*no change*/
1376         if(mask == 0)
1377                 return ret;
1378
1379         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1380         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1381
1382         return ret;
1383 }
1384
1385 int
1386 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1387 {
1388         struct rte_eth_dev *dev;
1389         int ret = 0;
1390
1391         if (port_id >= nb_ports) {
1392                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1393                 return (-ENODEV);
1394         }
1395
1396         dev = &rte_eth_devices[port_id];
1397
1398         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1399                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1400
1401         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1402                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1403
1404         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1405                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1406
1407         return ret;
1408 }
1409
1410 int
1411 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1412 {
1413         struct rte_eth_dev *dev;
1414
1415         if (port_id >= nb_ports) {
1416                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1417                 return (-ENODEV);
1418         }
1419         dev = &rte_eth_devices[port_id];
1420         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1421         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1422
1423         return 0;
1424 }
1425
1426 int
1427 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1428                                       struct rte_fdir_filter *fdir_filter,
1429                                       uint8_t queue)
1430 {
1431         struct rte_eth_dev *dev;
1432
1433         if (port_id >= nb_ports) {
1434                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1435                 return (-ENODEV);
1436         }
1437
1438         dev = &rte_eth_devices[port_id];
1439
1440         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1441                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1442                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1443                 return (-ENOSYS);
1444         }
1445
1446         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1447              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1448             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1449                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1450                                 "None l4type, source & destinations ports " \
1451                                 "should be null!\n");
1452                 return (-EINVAL);
1453         }
1454
1455         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1456         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1457                                                                 queue);
1458 }
1459
1460 int
1461 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1462                                          struct rte_fdir_filter *fdir_filter,
1463                                          uint8_t queue)
1464 {
1465         struct rte_eth_dev *dev;
1466
1467         if (port_id >= nb_ports) {
1468                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1469                 return (-ENODEV);
1470         }
1471
1472         dev = &rte_eth_devices[port_id];
1473
1474         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1475                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1476                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1477                 return (-ENOSYS);
1478         }
1479
1480         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1481              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1482             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1483                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1484                                 "None l4type, source & destinations ports " \
1485                                 "should be null!\n");
1486                 return (-EINVAL);
1487         }
1488
1489         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1490         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1491                                                                 queue);
1492
1493 }
1494
1495 int
1496 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1497                                          struct rte_fdir_filter *fdir_filter)
1498 {
1499         struct rte_eth_dev *dev;
1500
1501         if (port_id >= nb_ports) {
1502                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1503                 return (-ENODEV);
1504         }
1505
1506         dev = &rte_eth_devices[port_id];
1507
1508         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1509                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1510                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1511                 return (-ENOSYS);
1512         }
1513
1514         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1515              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1516             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1517                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1518                                 "None l4type source & destinations ports " \
1519                                 "should be null!\n");
1520                 return (-EINVAL);
1521         }
1522
1523         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1524         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1525 }
1526
1527 int
1528 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1529 {
1530         struct rte_eth_dev *dev;
1531
1532         if (port_id >= nb_ports) {
1533                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1534                 return (-ENODEV);
1535         }
1536
1537         dev = &rte_eth_devices[port_id];
1538         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1539                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1540                 return (-ENOSYS);
1541         }
1542
1543         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1544
1545         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1546         return (0);
1547 }
1548
1549 int
1550 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1551                                     struct rte_fdir_filter *fdir_filter,
1552                                     uint16_t soft_id, uint8_t queue,
1553                                     uint8_t drop)
1554 {
1555         struct rte_eth_dev *dev;
1556
1557         if (port_id >= nb_ports) {
1558                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1559                 return (-ENODEV);
1560         }
1561
1562         dev = &rte_eth_devices[port_id];
1563
1564         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1565                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1566                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1567                 return (-ENOSYS);
1568         }
1569
1570         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1571              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1572             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1573                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1574                                 "None l4type, source & destinations ports " \
1575                                 "should be null!\n");
1576                 return (-EINVAL);
1577         }
1578
1579         /* For now IPv6 is not supported with perfect filter */
1580         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1581                 return (-ENOTSUP);
1582
1583         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1584         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1585                                                                 soft_id, queue,
1586                                                                 drop);
1587 }
1588
1589 int
1590 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1591                                        struct rte_fdir_filter *fdir_filter,
1592                                        uint16_t soft_id, uint8_t queue,
1593                                        uint8_t drop)
1594 {
1595         struct rte_eth_dev *dev;
1596
1597         if (port_id >= nb_ports) {
1598                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1599                 return (-ENODEV);
1600         }
1601
1602         dev = &rte_eth_devices[port_id];
1603
1604         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1605                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1606                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1607                 return (-ENOSYS);
1608         }
1609
1610         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1611              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1612             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1613                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1614                                 "None l4type, source & destinations ports " \
1615                                 "should be null!\n");
1616                 return (-EINVAL);
1617         }
1618
1619         /* For now IPv6 is not supported with perfect filter */
1620         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1621                 return (-ENOTSUP);
1622
1623         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1624         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1625                                                         soft_id, queue, drop);
1626 }
1627
1628 int
1629 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1630                                        struct rte_fdir_filter *fdir_filter,
1631                                        uint16_t soft_id)
1632 {
1633         struct rte_eth_dev *dev;
1634
1635         if (port_id >= nb_ports) {
1636                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1637                 return (-ENODEV);
1638         }
1639
1640         dev = &rte_eth_devices[port_id];
1641
1642         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1643                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1644                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1645                 return (-ENOSYS);
1646         }
1647
1648         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1649              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1650             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1651                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1652                                 "None l4type, source & destinations ports " \
1653                                 "should be null!\n");
1654                 return (-EINVAL);
1655         }
1656
1657         /* For now IPv6 is not supported with perfect filter */
1658         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1659                 return (-ENOTSUP);
1660
1661         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1662         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
1663                                                                 soft_id);
1664 }
1665
1666 int
1667 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1668 {
1669         struct rte_eth_dev *dev;
1670
1671         if (port_id >= nb_ports) {
1672                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1673                 return (-ENODEV);
1674         }
1675
1676         dev = &rte_eth_devices[port_id];
1677         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1678                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1679                 return (-ENOSYS);
1680         }
1681
1682         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1683         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1684 }
1685
1686 int
1687 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1688 {
1689         struct rte_eth_dev *dev;
1690
1691         if (port_id >= nb_ports) {
1692                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1693                 return (-ENODEV);
1694         }
1695
1696         dev = &rte_eth_devices[port_id];
1697         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1698         memset(fc_conf, 0, sizeof(*fc_conf));
1699         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1700 }
1701
1702 int
1703 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1704 {
1705         struct rte_eth_dev *dev;
1706
1707         if (port_id >= nb_ports) {
1708                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1709                 return (-ENODEV);
1710         }
1711
1712         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1713                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1714                 return (-EINVAL);
1715         }
1716
1717         dev = &rte_eth_devices[port_id];
1718         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1719         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1720 }
1721
1722 int
1723 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1724 {
1725         struct rte_eth_dev *dev;
1726
1727         if (port_id >= nb_ports) {
1728                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1729                 return (-ENODEV);
1730         }
1731
1732         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1733                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1734                 return (-EINVAL);
1735         }
1736
1737         dev = &rte_eth_devices[port_id];
1738         /* High water, low water validation are device specific */
1739         if  (*dev->dev_ops->priority_flow_ctrl_set)
1740                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1741         return (-ENOTSUP);
1742 }
1743
1744 int
1745 rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1746 {
1747         struct rte_eth_dev *dev;
1748         uint16_t max_rxq;
1749         uint8_t i,j;
1750
1751         if (port_id >= nb_ports) {
1752                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1753                 return (-ENODEV);
1754         }
1755
1756         /* Invalid mask bit(s) setting */
1757         if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1758                 PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
1759                 return (-EINVAL);
1760         }
1761
1762         dev = &rte_eth_devices[port_id];
1763         max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ?
1764                 dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE;
1765         if (reta_conf->mask_lo != 0) {
1766                 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1767                         if ((reta_conf->mask_lo & (1ULL << i)) &&
1768                                 (reta_conf->reta[i] >= max_rxq)) {
1769                                 PMD_DEBUG_TRACE("RETA hash index output"
1770                                         "configration for port=%d,invalid"
1771                                         "queue=%d\n",port_id,reta_conf->reta[i]);
1772
1773                                 return (-EINVAL);
1774                         }
1775                 }
1776         }
1777
1778         if (reta_conf->mask_hi != 0) {
1779                 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1780                         j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
1781
1782                         /* Check if the max entry >= 128 */
1783                         if ((reta_conf->mask_hi & (1ULL << i)) &&
1784                                 (reta_conf->reta[j] >= max_rxq)) {
1785                                 PMD_DEBUG_TRACE("RETA hash index output"
1786                                         "configration for port=%d,invalid"
1787                                         "queue=%d\n",port_id,reta_conf->reta[j]);
1788
1789                                 return (-EINVAL);
1790                         }
1791                 }
1792         }
1793
1794         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1795         return (*dev->dev_ops->reta_update)(dev, reta_conf);
1796 }
1797
1798 int
1799 rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1800 {
1801         struct rte_eth_dev *dev;
1802
1803         if (port_id >= nb_ports) {
1804                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1805                 return (-ENODEV);
1806         }
1807
1808         if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1809                 PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
1810                 return (-EINVAL);
1811         }
1812
1813         dev = &rte_eth_devices[port_id];
1814         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1815         return (*dev->dev_ops->reta_query)(dev, reta_conf);
1816 }
1817
1818 int
1819 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1820 {
1821         struct rte_eth_dev *dev;
1822         uint16_t rss_hash_protos;
1823
1824         if (port_id >= nb_ports) {
1825                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1826                 return (-ENODEV);
1827         }
1828         rss_hash_protos = rss_conf->rss_hf;
1829         if ((rss_hash_protos != 0) &&
1830             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1831                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1832                                 rss_hash_protos);
1833                 return (-EINVAL);
1834         }
1835         dev = &rte_eth_devices[port_id];
1836         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1837         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1838 }
1839
1840 int
1841 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1842                               struct rte_eth_rss_conf *rss_conf)
1843 {
1844         struct rte_eth_dev *dev;
1845
1846         if (port_id >= nb_ports) {
1847                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1848                 return (-ENODEV);
1849         }
1850         dev = &rte_eth_devices[port_id];
1851         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1852         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1853 }
1854
1855 int
1856 rte_eth_led_on(uint8_t port_id)
1857 {
1858         struct rte_eth_dev *dev;
1859
1860         if (port_id >= nb_ports) {
1861                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1862                 return (-ENODEV);
1863         }
1864
1865         dev = &rte_eth_devices[port_id];
1866         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1867         return ((*dev->dev_ops->dev_led_on)(dev));
1868 }
1869
1870 int
1871 rte_eth_led_off(uint8_t port_id)
1872 {
1873         struct rte_eth_dev *dev;
1874
1875         if (port_id >= nb_ports) {
1876                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1877                 return (-ENODEV);
1878         }
1879
1880         dev = &rte_eth_devices[port_id];
1881         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1882         return ((*dev->dev_ops->dev_led_off)(dev));
1883 }
1884
1885 /*
1886  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1887  * an empty spot.
1888  */
1889 static inline int
1890 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1891 {
1892         struct rte_eth_dev_info dev_info;
1893         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1894         unsigned i;
1895
1896         rte_eth_dev_info_get(port_id, &dev_info);
1897
1898         for (i = 0; i < dev_info.max_mac_addrs; i++)
1899                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1900                         return i;
1901
1902         return -1;
1903 }
1904
1905 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
1906
1907 int
1908 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
1909                         uint32_t pool)
1910 {
1911         struct rte_eth_dev *dev;
1912         int index;
1913         uint64_t pool_mask;
1914
1915         if (port_id >= nb_ports) {
1916                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1917                 return (-ENODEV);
1918         }
1919         dev = &rte_eth_devices[port_id];
1920         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
1921
1922         if (is_zero_ether_addr(addr)) {
1923                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
1924                         port_id);
1925                 return (-EINVAL);
1926         }
1927         if (pool >= ETH_64_POOLS) {
1928                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
1929                 return (-EINVAL);
1930         }
1931
1932         index = get_mac_addr_index(port_id, addr);
1933         if (index < 0) {
1934                 index = get_mac_addr_index(port_id, &null_mac_addr);
1935                 if (index < 0) {
1936                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1937                                 port_id);
1938                         return (-ENOSPC);
1939                 }
1940         } else {
1941                 pool_mask = dev->data->mac_pool_sel[index];
1942
1943                 /* Check if both MAC address and pool is alread there, and do nothing */
1944                 if (pool_mask & (1ULL << pool))
1945                         return 0;
1946         }
1947
1948         /* Update NIC */
1949         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
1950
1951         /* Update address in NIC data structure */
1952         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
1953
1954         /* Update pool bitmap in NIC data structure */
1955         dev->data->mac_pool_sel[index] |= (1ULL << pool);
1956
1957         return 0;
1958 }
1959
1960 int
1961 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
1962 {
1963         struct rte_eth_dev *dev;
1964         int index;
1965
1966         if (port_id >= nb_ports) {
1967                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1968                 return (-ENODEV);
1969         }
1970         dev = &rte_eth_devices[port_id];
1971         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
1972
1973         index = get_mac_addr_index(port_id, addr);
1974         if (index == 0) {
1975                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
1976                 return (-EADDRINUSE);
1977         } else if (index < 0)
1978                 return 0;  /* Do nothing if address wasn't found */
1979
1980         /* Update NIC */
1981         (*dev->dev_ops->mac_addr_remove)(dev, index);
1982
1983         /* Update address in NIC data structure */
1984         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
1985
1986         return 0;
1987 }
1988
1989 int
1990 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
1991                                 uint16_t rx_mode, uint8_t on)
1992 {
1993         uint16_t num_vfs;
1994         struct rte_eth_dev *dev;
1995         struct rte_eth_dev_info dev_info;
1996
1997         if (port_id >= nb_ports) {
1998                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
1999                                 port_id);
2000                 return (-ENODEV);
2001         }
2002
2003         dev = &rte_eth_devices[port_id];
2004         rte_eth_dev_info_get(port_id, &dev_info);
2005
2006         num_vfs = dev_info.max_vfs;
2007         if (vf > num_vfs)
2008         {
2009                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2010                 return (-EINVAL);
2011         }
2012         if (rx_mode == 0)
2013         {
2014                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2015                 return (-EINVAL);
2016         }
2017         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2018         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2019 }
2020
2021 /*
2022  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2023  * an empty spot.
2024  */
2025 static inline int
2026 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2027 {
2028         struct rte_eth_dev_info dev_info;
2029         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2030         unsigned i;
2031
2032         rte_eth_dev_info_get(port_id, &dev_info);
2033         if (!dev->data->hash_mac_addrs)
2034                 return -1;
2035
2036         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2037                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2038                         ETHER_ADDR_LEN) == 0)
2039                         return i;
2040
2041         return -1;
2042 }
2043
2044 int
2045 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2046                                 uint8_t on)
2047 {
2048         int index;
2049         int ret;
2050         struct rte_eth_dev *dev;
2051
2052         if (port_id >= nb_ports) {
2053                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2054                         port_id);
2055                 return (-ENODEV);
2056         }
2057
2058         dev = &rte_eth_devices[port_id];
2059         if (is_zero_ether_addr(addr)) {
2060                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2061                         port_id);
2062                 return (-EINVAL);
2063         }
2064
2065         index = get_hash_mac_addr_index(port_id, addr);
2066         /* Check if it's already there, and do nothing */
2067         if ((index >= 0) && (on))
2068                 return 0;
2069
2070         if (index < 0) {
2071                 if (!on) {
2072                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
2073                                 "set in UTA\n", port_id);
2074                         return (-EINVAL);
2075                 }
2076
2077                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2078                 if (index < 0) {
2079                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2080                                         port_id);
2081                         return (-ENOSPC);
2082                 }
2083         }
2084
2085         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2086         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2087         if (ret == 0) {
2088                 /* Update address in NIC data structure */
2089                 if (on)
2090                         ether_addr_copy(addr,
2091                                         &dev->data->hash_mac_addrs[index]);
2092                 else
2093                         ether_addr_copy(&null_mac_addr,
2094                                         &dev->data->hash_mac_addrs[index]);
2095         }
2096
2097         return ret;
2098 }
2099
2100 int
2101 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2102 {
2103         struct rte_eth_dev *dev;
2104
2105         if (port_id >= nb_ports) {
2106                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2107                         port_id);
2108                 return (-ENODEV);
2109         }
2110
2111         dev = &rte_eth_devices[port_id];
2112
2113         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2114         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2115 }
2116
2117 int
2118 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
2119 {
2120         uint16_t num_vfs;
2121         struct rte_eth_dev *dev;
2122         struct rte_eth_dev_info dev_info;
2123
2124         if (port_id >= nb_ports) {
2125                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2126                 return (-ENODEV);
2127         }
2128
2129         dev = &rte_eth_devices[port_id];
2130         rte_eth_dev_info_get(port_id, &dev_info);
2131
2132         num_vfs = dev_info.max_vfs;
2133         if (vf > num_vfs)
2134         {
2135                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2136                 return (-EINVAL);
2137         }
2138
2139         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2140         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2141 }
2142
2143 int
2144 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2145 {
2146         uint16_t num_vfs;
2147         struct rte_eth_dev *dev;
2148         struct rte_eth_dev_info dev_info;
2149
2150         if (port_id >= nb_ports) {
2151                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2152                 return (-ENODEV);
2153         }
2154
2155         dev = &rte_eth_devices[port_id];
2156         rte_eth_dev_info_get(port_id, &dev_info);
2157
2158         num_vfs = dev_info.max_vfs;
2159         if (vf > num_vfs)
2160         {
2161                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2162                 return (-EINVAL);
2163         }
2164
2165         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2166         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2167 }
2168
2169 int
2170 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2171                                  uint64_t vf_mask,uint8_t vlan_on)
2172 {
2173         struct rte_eth_dev *dev;
2174
2175         if (port_id >= nb_ports) {
2176                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2177                                 port_id);
2178                 return (-ENODEV);
2179         }
2180         dev = &rte_eth_devices[port_id];
2181
2182         if(vlan_id > ETHER_MAX_VLAN_ID)
2183         {
2184                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2185                         vlan_id);
2186                 return (-EINVAL);
2187         }
2188         if (vf_mask == 0)
2189         {
2190                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2191                 return (-EINVAL);
2192         }
2193
2194         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2195         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2196                                                 vf_mask,vlan_on);
2197 }
2198
2199 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2200                                         uint16_t tx_rate)
2201 {
2202         struct rte_eth_dev *dev;
2203         struct rte_eth_dev_info dev_info;
2204         struct rte_eth_link link;
2205
2206         if (port_id >= nb_ports) {
2207                 PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
2208                                 port_id);
2209                 return -ENODEV;
2210         }
2211
2212         dev = &rte_eth_devices[port_id];
2213         rte_eth_dev_info_get(port_id, &dev_info);
2214         link = dev->data->dev_link;
2215
2216         if (queue_idx > dev_info.max_tx_queues) {
2217                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2218                                 "invalid queue id=%d\n", port_id, queue_idx);
2219                 return -EINVAL;
2220         }
2221
2222         if (tx_rate > link.link_speed) {
2223                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2224                                 "bigger than link speed= %d\n",
2225                         tx_rate, link_speed);
2226                 return -EINVAL;
2227         }
2228
2229         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2230         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2231 }
2232
2233 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2234                                 uint64_t q_msk)
2235 {
2236         struct rte_eth_dev *dev;
2237         struct rte_eth_dev_info dev_info;
2238         struct rte_eth_link link;
2239
2240         if (q_msk == 0)
2241                 return 0;
2242
2243         if (port_id >= nb_ports) {
2244                 PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
2245                                 port_id);
2246                 return -ENODEV;
2247         }
2248
2249         dev = &rte_eth_devices[port_id];
2250         rte_eth_dev_info_get(port_id, &dev_info);
2251         link = dev->data->dev_link;
2252
2253         if (vf > dev_info.max_vfs) {
2254                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2255                                 "invalid vf id=%d\n", port_id, vf);
2256                 return -EINVAL;
2257         }
2258
2259         if (tx_rate > link.link_speed) {
2260                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2261                                 "bigger than link speed= %d\n",
2262                                 tx_rate, link_speed);
2263                 return -EINVAL;
2264         }
2265
2266         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2267         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2268 }
2269
2270 int
2271 rte_eth_mirror_rule_set(uint8_t port_id,
2272                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
2273                         uint8_t rule_id, uint8_t on)
2274 {
2275         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2276
2277         if (port_id >= nb_ports) {
2278                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2279                 return (-ENODEV);
2280         }
2281
2282         if (mirror_conf->rule_type_mask == 0) {
2283                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2284                 return (-EINVAL);
2285         }
2286
2287         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2288                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
2289                         "be 0-%d\n",ETH_64_POOLS - 1);
2290                 return (-EINVAL);
2291         }
2292
2293         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
2294                 (mirror_conf->pool_mask == 0)) {
2295                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
2296                                 "be 0.\n");
2297                 return (-EINVAL);
2298         }
2299
2300         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2301         {
2302                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2303                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
2304                 return (-EINVAL);
2305         }
2306
2307         dev = &rte_eth_devices[port_id];
2308         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2309
2310         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2311 }
2312
2313 int
2314 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2315 {
2316         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2317
2318         if (port_id >= nb_ports) {
2319                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2320                 return (-ENODEV);
2321         }
2322
2323         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2324         {
2325                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2326                         ETH_VMDQ_NUM_MIRROR_RULE-1);
2327                 return (-EINVAL);
2328         }
2329
2330         dev = &rte_eth_devices[port_id];
2331         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2332
2333         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2334 }
2335
2336 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2337 uint16_t
2338 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2339                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2340 {
2341         struct rte_eth_dev *dev;
2342
2343         if (port_id >= nb_ports) {
2344                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2345                 return 0;
2346         }
2347         dev = &rte_eth_devices[port_id];
2348         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
2349         if (queue_id >= dev->data->nb_rx_queues) {
2350                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2351                 return 0;
2352         }
2353         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2354                                                 rx_pkts, nb_pkts);
2355 }
2356
2357 uint16_t
2358 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2359                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2360 {
2361         struct rte_eth_dev *dev;
2362
2363         if (port_id >= nb_ports) {
2364                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2365                 return 0;
2366         }
2367         dev = &rte_eth_devices[port_id];
2368
2369         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
2370         if (queue_id >= dev->data->nb_tx_queues) {
2371                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2372                 return 0;
2373         }
2374         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2375                                                 tx_pkts, nb_pkts);
2376 }
2377
2378 uint32_t
2379 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2380 {
2381         struct rte_eth_dev *dev;
2382
2383         if (port_id >= nb_ports) {
2384                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2385                 return 0;
2386         }
2387         dev = &rte_eth_devices[port_id];
2388         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2389         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2390 }
2391
2392 int
2393 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2394 {
2395         struct rte_eth_dev *dev;
2396
2397         if (port_id >= nb_ports) {
2398                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2399                 return (-ENODEV);
2400         }
2401         dev = &rte_eth_devices[port_id];
2402         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2403         return (*dev->dev_ops->rx_descriptor_done)( \
2404                 dev->data->rx_queues[queue_id], offset);
2405 }
2406 #endif
2407
2408 int
2409 rte_eth_dev_callback_register(uint8_t port_id,
2410                         enum rte_eth_event_type event,
2411                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2412 {
2413         struct rte_eth_dev *dev;
2414         struct rte_eth_dev_callback *user_cb;
2415
2416         if (!cb_fn)
2417                 return (-EINVAL);
2418         if (port_id >= nb_ports) {
2419                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2420                 return (-EINVAL);
2421         }
2422
2423         dev = &rte_eth_devices[port_id];
2424         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2425
2426         TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
2427                 if (user_cb->cb_fn == cb_fn &&
2428                         user_cb->cb_arg == cb_arg &&
2429                         user_cb->event == event) {
2430                         break;
2431                 }
2432         }
2433
2434         /* create a new callback. */
2435         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2436                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2437                 user_cb->cb_fn = cb_fn;
2438                 user_cb->cb_arg = cb_arg;
2439                 user_cb->event = event;
2440                 TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
2441         }
2442
2443         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2444         return ((user_cb == NULL) ? -ENOMEM : 0);
2445 }
2446
2447 int
2448 rte_eth_dev_callback_unregister(uint8_t port_id,
2449                         enum rte_eth_event_type event,
2450                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2451 {
2452         int ret;
2453         struct rte_eth_dev *dev;
2454         struct rte_eth_dev_callback *cb, *next;
2455
2456         if (!cb_fn)
2457                 return (-EINVAL);
2458         if (port_id >= nb_ports) {
2459                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2460                 return (-EINVAL);
2461         }
2462
2463         dev = &rte_eth_devices[port_id];
2464         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2465
2466         ret = 0;
2467         for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
2468
2469                 next = TAILQ_NEXT(cb, next);
2470
2471                 if (cb->cb_fn != cb_fn || cb->event != event ||
2472                                 (cb->cb_arg != (void *)-1 &&
2473                                 cb->cb_arg != cb_arg))
2474                         continue;
2475
2476                 /*
2477                  * if this callback is not executing right now,
2478                  * then remove it.
2479                  */
2480                 if (cb->active == 0) {
2481                         TAILQ_REMOVE(&(dev->callbacks), cb, next);
2482                         rte_free(cb);
2483                 } else {
2484                         ret = -EAGAIN;
2485                 }
2486         }
2487
2488         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2489         return (ret);
2490 }
2491
2492 void
2493 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2494         enum rte_eth_event_type event)
2495 {
2496         struct rte_eth_dev_callback *cb_lst;
2497         struct rte_eth_dev_callback dev_cb;
2498
2499         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2500         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
2501                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2502                         continue;
2503                 dev_cb = *cb_lst;
2504                 cb_lst->active = 1;
2505                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2506                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2507                                                 dev_cb.cb_arg);
2508                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2509                 cb_lst->active = 0;
2510         }
2511         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2512 }
2513 #ifdef RTE_NIC_BYPASS
2514 int rte_eth_dev_bypass_init(uint8_t port_id)
2515 {
2516         struct rte_eth_dev *dev;
2517
2518         if (port_id >= nb_ports) {
2519                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2520                 return (-ENODEV);
2521         }
2522
2523         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2524                 PMD_DEBUG_TRACE("Invalid port device\n");
2525                 return (-ENODEV);
2526         }
2527
2528         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2529         (*dev->dev_ops->bypass_init)(dev);
2530         return 0;
2531 }
2532
2533 int
2534 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2535 {
2536         struct rte_eth_dev *dev;
2537
2538         if (port_id >= nb_ports) {
2539                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2540                 return (-ENODEV);
2541         }
2542
2543         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2544                 PMD_DEBUG_TRACE("Invalid port device\n");
2545                 return (-ENODEV);
2546         }
2547         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2548         (*dev->dev_ops->bypass_state_show)(dev, state);
2549         return 0;
2550 }
2551
2552 int
2553 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2554 {
2555         struct rte_eth_dev *dev;
2556
2557         if (port_id >= nb_ports) {
2558                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2559                 return (-ENODEV);
2560         }
2561
2562         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2563                 PMD_DEBUG_TRACE("Invalid port device\n");
2564                 return (-ENODEV);
2565         }
2566
2567         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2568         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2569         return 0;
2570 }
2571
2572 int
2573 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2574 {
2575         struct rte_eth_dev *dev;
2576
2577         if (port_id >= nb_ports) {
2578                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2579                 return (-ENODEV);
2580         }
2581
2582         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2583                 PMD_DEBUG_TRACE("Invalid port device\n");
2584                 return (-ENODEV);
2585         }
2586
2587         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2588         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2589         return 0;
2590 }
2591
2592 int
2593 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2594 {
2595         struct rte_eth_dev *dev;
2596
2597         if (port_id >= nb_ports) {
2598                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2599                 return (-ENODEV);
2600         }
2601
2602         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2603                 PMD_DEBUG_TRACE("Invalid port device\n");
2604                 return (-ENODEV);
2605         }
2606
2607         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2608         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2609         return 0;
2610 }
2611
2612 int
2613 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2614 {
2615         struct rte_eth_dev *dev;
2616
2617         if (port_id >= nb_ports) {
2618                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2619                 return (-ENODEV);
2620         }
2621
2622         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2623                 PMD_DEBUG_TRACE("Invalid port device\n");
2624                 return (-ENODEV);
2625         }
2626
2627         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2628         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2629         return 0;
2630 }
2631
2632 int
2633 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2634 {
2635         struct rte_eth_dev *dev;
2636
2637         if (port_id >= nb_ports) {
2638                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2639                 return (-ENODEV);
2640         }
2641
2642         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2643                 PMD_DEBUG_TRACE("Invalid port device\n");
2644                 return (-ENODEV);
2645         }
2646
2647         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2648         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2649         return 0;
2650 }
2651
2652 int
2653 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2654 {
2655         struct rte_eth_dev *dev;
2656
2657         if (port_id >= nb_ports) {
2658                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2659                 return (-ENODEV);
2660         }
2661
2662         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2663                 PMD_DEBUG_TRACE("Invalid port device\n");
2664                 return (-ENODEV);
2665         }
2666
2667         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2668         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2669         return 0;
2670 }
2671
2672 int
2673 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2674 {
2675         struct rte_eth_dev *dev;
2676
2677         if (port_id >= nb_ports) {
2678                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2679                 return (-ENODEV);
2680         }
2681
2682         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2683                 PMD_DEBUG_TRACE("Invalid port device\n");
2684                 return (-ENODEV);
2685         }
2686
2687         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2688         (*dev->dev_ops->bypass_wd_reset)(dev);
2689         return 0;
2690 }
2691 #endif
2692
2693 int
2694 rte_eth_dev_add_syn_filter(uint8_t port_id,
2695                         struct rte_syn_filter *filter, uint16_t rx_queue)
2696 {
2697         struct rte_eth_dev *dev;
2698
2699         if (port_id >= nb_ports) {
2700                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2701                 return -ENODEV;
2702         }
2703
2704         dev = &rte_eth_devices[port_id];
2705         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_syn_filter, -ENOTSUP);
2706         return (*dev->dev_ops->add_syn_filter)(dev, filter, rx_queue);
2707 }
2708
2709 int
2710 rte_eth_dev_remove_syn_filter(uint8_t port_id)
2711 {
2712         struct rte_eth_dev *dev;
2713
2714         if (port_id >= nb_ports) {
2715                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2716                 return -ENODEV;
2717         }
2718
2719         dev = &rte_eth_devices[port_id];
2720         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_syn_filter, -ENOTSUP);
2721         return (*dev->dev_ops->remove_syn_filter)(dev);
2722 }
2723
2724 int
2725 rte_eth_dev_get_syn_filter(uint8_t port_id,
2726                         struct rte_syn_filter *filter, uint16_t *rx_queue)
2727 {
2728         struct rte_eth_dev *dev;
2729
2730         if (filter == NULL || rx_queue == NULL)
2731                 return -EINVAL;
2732
2733         if (port_id >= nb_ports) {
2734                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2735                 return -ENODEV;
2736         }
2737
2738         dev = &rte_eth_devices[port_id];
2739         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_syn_filter, -ENOTSUP);
2740         return (*dev->dev_ops->get_syn_filter)(dev, filter, rx_queue);
2741 }
2742
2743 int
2744 rte_eth_dev_add_ethertype_filter(uint8_t port_id, uint16_t index,
2745                         struct rte_ethertype_filter *filter, uint16_t rx_queue)
2746 {
2747         struct rte_eth_dev *dev;
2748
2749         if (port_id >= nb_ports) {
2750                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2751                 return -ENODEV;
2752         }
2753         if (filter->ethertype == ETHER_TYPE_IPv4 ||
2754                 filter->ethertype == ETHER_TYPE_IPv6){
2755                 PMD_DEBUG_TRACE("IP and IPv6 are not supported"
2756                         " in ethertype filter\n");
2757                 return -EINVAL;
2758         }
2759         dev = &rte_eth_devices[port_id];
2760         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_ethertype_filter, -ENOTSUP);
2761         return (*dev->dev_ops->add_ethertype_filter)(dev, index,
2762                                         filter, rx_queue);
2763 }
2764
2765 int
2766 rte_eth_dev_remove_ethertype_filter(uint8_t port_id,  uint16_t index)
2767 {
2768         struct rte_eth_dev *dev;
2769
2770         if (port_id >= nb_ports) {
2771                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2772                 return -ENODEV;
2773         }
2774
2775         dev = &rte_eth_devices[port_id];
2776         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_ethertype_filter, -ENOTSUP);
2777         return (*dev->dev_ops->remove_ethertype_filter)(dev, index);
2778 }
2779
2780 int
2781 rte_eth_dev_get_ethertype_filter(uint8_t port_id, uint16_t index,
2782                         struct rte_ethertype_filter *filter, uint16_t *rx_queue)
2783 {
2784         struct rte_eth_dev *dev;
2785
2786         if (filter == NULL || rx_queue == NULL)
2787                 return -EINVAL;
2788
2789         if (port_id >= nb_ports) {
2790                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2791                 return -ENODEV;
2792         }
2793
2794         dev = &rte_eth_devices[port_id];
2795         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_ethertype_filter, -ENOTSUP);
2796         return (*dev->dev_ops->get_ethertype_filter)(dev, index,
2797                                                 filter, rx_queue);
2798 }
2799
2800 int
2801 rte_eth_dev_add_2tuple_filter(uint8_t port_id, uint16_t index,
2802                         struct rte_2tuple_filter *filter, uint16_t rx_queue)
2803 {
2804         struct rte_eth_dev *dev;
2805
2806         if (port_id >= nb_ports) {
2807                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2808                 return -ENODEV;
2809         }
2810         if (filter->protocol != IPPROTO_TCP &&
2811                 filter->tcp_flags != 0){
2812                 PMD_DEBUG_TRACE("tcp flags is 0x%x, but the protocol value"
2813                         " is not TCP\n",
2814                         filter->tcp_flags);
2815                 return -EINVAL;
2816         }
2817
2818         dev = &rte_eth_devices[port_id];
2819         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_2tuple_filter, -ENOTSUP);
2820         return (*dev->dev_ops->add_2tuple_filter)(dev, index, filter, rx_queue);
2821 }
2822
2823 int
2824 rte_eth_dev_remove_2tuple_filter(uint8_t port_id, uint16_t index)
2825 {
2826         struct rte_eth_dev *dev;
2827
2828         if (port_id >= nb_ports) {
2829                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2830                 return -ENODEV;
2831         }
2832
2833         dev = &rte_eth_devices[port_id];
2834         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_2tuple_filter, -ENOTSUP);
2835         return (*dev->dev_ops->remove_2tuple_filter)(dev, index);
2836 }
2837
2838 int
2839 rte_eth_dev_get_2tuple_filter(uint8_t port_id, uint16_t index,
2840                         struct rte_2tuple_filter *filter, uint16_t *rx_queue)
2841 {
2842         struct rte_eth_dev *dev;
2843
2844         if (filter == NULL || rx_queue == NULL)
2845                 return -EINVAL;
2846
2847         if (port_id >= nb_ports) {
2848                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2849                 return -ENODEV;
2850         }
2851
2852         dev = &rte_eth_devices[port_id];
2853         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_2tuple_filter, -ENOTSUP);
2854         return (*dev->dev_ops->get_2tuple_filter)(dev, index, filter, rx_queue);
2855 }
2856
2857 int
2858 rte_eth_dev_add_5tuple_filter(uint8_t port_id, uint16_t index,
2859                         struct rte_5tuple_filter *filter, uint16_t rx_queue)
2860 {
2861         struct rte_eth_dev *dev;
2862
2863         if (port_id >= nb_ports) {
2864                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2865                 return -ENODEV;
2866         }
2867
2868         if (filter->protocol != IPPROTO_TCP &&
2869                 filter->tcp_flags != 0){
2870                 PMD_DEBUG_TRACE("tcp flags is 0x%x, but the protocol value"
2871                         " is not TCP\n",
2872                         filter->tcp_flags);
2873                 return -EINVAL;
2874         }
2875
2876         dev = &rte_eth_devices[port_id];
2877         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_5tuple_filter, -ENOTSUP);
2878         return (*dev->dev_ops->add_5tuple_filter)(dev, index, filter, rx_queue);
2879 }
2880
2881 int
2882 rte_eth_dev_remove_5tuple_filter(uint8_t port_id, uint16_t index)
2883 {
2884         struct rte_eth_dev *dev;
2885
2886         if (port_id >= nb_ports) {
2887                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2888                 return -ENODEV;
2889         }
2890
2891         dev = &rte_eth_devices[port_id];
2892         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_5tuple_filter, -ENOTSUP);
2893         return (*dev->dev_ops->remove_5tuple_filter)(dev, index);
2894 }
2895
2896 int
2897 rte_eth_dev_get_5tuple_filter(uint8_t port_id, uint16_t index,
2898                         struct rte_5tuple_filter *filter, uint16_t *rx_queue)
2899 {
2900         struct rte_eth_dev *dev;
2901
2902         if (filter == NULL || rx_queue == NULL)
2903                 return -EINVAL;
2904
2905         if (port_id >= nb_ports) {
2906                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2907                 return -ENODEV;
2908         }
2909
2910         dev = &rte_eth_devices[port_id];
2911         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_5tuple_filter, -ENOTSUP);
2912         return (*dev->dev_ops->get_5tuple_filter)(dev, index, filter,
2913                                                 rx_queue);
2914 }
2915
2916 int
2917 rte_eth_dev_add_flex_filter(uint8_t port_id, uint16_t index,
2918                         struct rte_flex_filter *filter, uint16_t rx_queue)
2919 {
2920         struct rte_eth_dev *dev;
2921
2922         if (port_id >= nb_ports) {
2923                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2924                 return -ENODEV;
2925         }
2926
2927         dev = &rte_eth_devices[port_id];
2928         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_flex_filter, -ENOTSUP);
2929         return (*dev->dev_ops->add_flex_filter)(dev, index, filter, rx_queue);
2930 }
2931
2932 int
2933 rte_eth_dev_remove_flex_filter(uint8_t port_id, uint16_t index)
2934 {
2935         struct rte_eth_dev *dev;
2936
2937         if (port_id >= nb_ports) {
2938                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2939                 return -ENODEV;
2940         }
2941
2942         dev = &rte_eth_devices[port_id];
2943         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_flex_filter, -ENOTSUP);
2944         return (*dev->dev_ops->remove_flex_filter)(dev, index);
2945 }
2946
2947 int
2948 rte_eth_dev_get_flex_filter(uint8_t port_id, uint16_t index,
2949                         struct rte_flex_filter *filter, uint16_t *rx_queue)
2950 {
2951         struct rte_eth_dev *dev;
2952
2953         if (filter == NULL || rx_queue == NULL)
2954                 return -EINVAL;
2955
2956         if (port_id >= nb_ports) {
2957                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2958                 return -ENODEV;
2959         }
2960
2961         dev = &rte_eth_devices[port_id];
2962         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_flex_filter, -ENOTSUP);
2963         return (*dev->dev_ops->get_flex_filter)(dev, index, filter,
2964                                                 rx_queue);
2965 }