ethdev: add filters
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while(0)
87 #define PROC_PRIMARY_OR_RET() do { \
88         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
89                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
90                 return; \
91         } \
92 } while(0)
93
94 /* Macros to check for invlaid function pointers in dev_ops structure */
95 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
96         if ((func) == NULL) { \
97                 PMD_DEBUG_TRACE("Function not supported\n"); \
98                 return (retval); \
99         } \
100 } while(0)
101 #define FUNC_PTR_OR_RET(func) do { \
102         if ((func) == NULL) { \
103                 PMD_DEBUG_TRACE("Function not supported\n"); \
104                 return; \
105         } \
106 } while(0)
107
108 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
109 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
110 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
111 static uint8_t nb_ports = 0;
112
113 /* spinlock for eth device callbacks */
114 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
115
116 /**
117  * The user application callback description.
118  *
119  * It contains callback address to be registered by user application,
120  * the pointer to the parameters for callback, and the event type.
121  */
122 struct rte_eth_dev_callback {
123         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
124         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
125         void *cb_arg;                           /**< Parameter for callback */
126         enum rte_eth_event_type event;          /**< Interrupt event type */
127         uint32_t active;                        /**< Callback is executing */
128 };
129
130 enum {
131         STAT_QMAP_TX = 0,
132         STAT_QMAP_RX
133 };
134
135 static inline void
136 rte_eth_dev_data_alloc(void)
137 {
138         const unsigned flags = 0;
139         const struct rte_memzone *mz;
140
141         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
142                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
143                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
144                                 rte_socket_id(), flags);
145         } else
146                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
147         if (mz == NULL)
148                 rte_panic("Cannot allocate memzone for ethernet port data\n");
149
150         rte_eth_dev_data = mz->addr;
151         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
152                 memset(rte_eth_dev_data, 0,
153                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
154 }
155
156 struct rte_eth_dev *
157 rte_eth_dev_allocate(void)
158 {
159         struct rte_eth_dev *eth_dev;
160
161         if (nb_ports == RTE_MAX_ETHPORTS) {
162                 PMD_DEBUG_TRACE("Reached maximum number of ethernet ports\n");
163                 return NULL;
164         }
165
166         if (rte_eth_dev_data == NULL)
167                 rte_eth_dev_data_alloc();
168
169         eth_dev = &rte_eth_devices[nb_ports];
170         eth_dev->data = &rte_eth_dev_data[nb_ports];
171         eth_dev->data->port_id = nb_ports++;
172         return eth_dev;
173 }
174
175 static int
176 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
177                  struct rte_pci_device *pci_dev)
178 {
179         struct eth_driver    *eth_drv;
180         struct rte_eth_dev *eth_dev;
181         int diag;
182
183         eth_drv = (struct eth_driver *)pci_drv;
184
185         eth_dev = rte_eth_dev_allocate();
186         if (eth_dev == NULL)
187                 return -ENOMEM;
188
189         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
190                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
191                                   eth_drv->dev_private_size,
192                                   CACHE_LINE_SIZE);
193                 if (eth_dev->data->dev_private == NULL)
194                         rte_panic("Cannot allocate memzone for private port data\n");
195         }
196         eth_dev->pci_dev = pci_dev;
197         eth_dev->driver = eth_drv;
198         eth_dev->data->rx_mbuf_alloc_failed = 0;
199
200         /* init user callbacks */
201         TAILQ_INIT(&(eth_dev->callbacks));
202
203         /*
204          * Set the default maximum frame size.
205          */
206         eth_dev->data->max_frame_size = ETHER_MAX_LEN;
207
208         /* Invoke PMD device initialization function */
209         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
210         if (diag == 0)
211                 return (0);
212
213         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
214                         " failed\n", pci_drv->name,
215                         (unsigned) pci_dev->id.vendor_id,
216                         (unsigned) pci_dev->id.device_id);
217         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
218                 rte_free(eth_dev->data->dev_private);
219         nb_ports--;
220         return diag;
221 }
222
223 /**
224  * Register an Ethernet [Poll Mode] driver.
225  *
226  * Function invoked by the initialization function of an Ethernet driver
227  * to simultaneously register itself as a PCI driver and as an Ethernet
228  * Poll Mode Driver.
229  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
230  * structure embedded in the *eth_drv* structure, after having stored the
231  * address of the rte_eth_dev_init() function in the *devinit* field of
232  * the *pci_drv* structure.
233  * During the PCI probing phase, the rte_eth_dev_init() function is
234  * invoked for each PCI [Ethernet device] matching the embedded PCI
235  * identifiers provided by the driver.
236  */
237 void
238 rte_eth_driver_register(struct eth_driver *eth_drv)
239 {
240         eth_drv->pci_drv.devinit = rte_eth_dev_init;
241         rte_eal_pci_register(&eth_drv->pci_drv);
242 }
243
244 int
245 rte_eth_dev_socket_id(uint8_t port_id)
246 {
247         if (port_id >= nb_ports)
248                 return -1;
249         return rte_eth_devices[port_id].pci_dev->numa_node;
250 }
251
252 uint8_t
253 rte_eth_dev_count(void)
254 {
255         return (nb_ports);
256 }
257
258 static int
259 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
260 {
261         uint16_t old_nb_queues = dev->data->nb_rx_queues;
262         void **rxq;
263         unsigned i;
264
265         if (dev->data->rx_queues == NULL) { /* first time configuration */
266                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
267                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
268                                 CACHE_LINE_SIZE);
269                 if (dev->data->rx_queues == NULL) {
270                         dev->data->nb_rx_queues = 0;
271                         return -(ENOMEM);
272                 }
273         } else { /* re-configure */
274                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
275
276                 rxq = dev->data->rx_queues;
277
278                 for (i = nb_queues; i < old_nb_queues; i++)
279                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
280                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
281                                 CACHE_LINE_SIZE);
282                 if (rxq == NULL)
283                         return -(ENOMEM);
284
285                 if (nb_queues > old_nb_queues)
286                         memset(rxq + old_nb_queues, 0,
287                                 sizeof(rxq[0]) * (nb_queues - old_nb_queues));
288
289                 dev->data->rx_queues = rxq;
290
291         }
292         dev->data->nb_rx_queues = nb_queues;
293         return (0);
294 }
295
296 int
297 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
298 {
299         struct rte_eth_dev *dev;
300
301         /* This function is only safe when called from the primary process
302          * in a multi-process setup*/
303         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
304
305         if (port_id >= nb_ports) {
306                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
307                 return -EINVAL;
308         }
309
310         dev = &rte_eth_devices[port_id];
311         if (rx_queue_id >= dev->data->nb_rx_queues) {
312                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
313                 return -EINVAL;
314         }
315
316         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
317
318         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
319
320 }
321
322 int
323 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
324 {
325         struct rte_eth_dev *dev;
326
327         /* This function is only safe when called from the primary process
328          * in a multi-process setup*/
329         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
330
331         if (port_id >= nb_ports) {
332                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
333                 return -EINVAL;
334         }
335
336         dev = &rte_eth_devices[port_id];
337         if (rx_queue_id >= dev->data->nb_rx_queues) {
338                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
339                 return -EINVAL;
340         }
341
342         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
343
344         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
345
346 }
347
348 int
349 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
350 {
351         struct rte_eth_dev *dev;
352
353         /* This function is only safe when called from the primary process
354          * in a multi-process setup*/
355         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
356
357         if (port_id >= nb_ports) {
358                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
359                 return -EINVAL;
360         }
361
362         dev = &rte_eth_devices[port_id];
363         if (tx_queue_id >= dev->data->nb_tx_queues) {
364                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
365                 return -EINVAL;
366         }
367
368         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
369
370         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
371
372 }
373
374 int
375 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
376 {
377         struct rte_eth_dev *dev;
378
379         /* This function is only safe when called from the primary process
380          * in a multi-process setup*/
381         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
382
383         if (port_id >= nb_ports) {
384                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
385                 return -EINVAL;
386         }
387
388         dev = &rte_eth_devices[port_id];
389         if (tx_queue_id >= dev->data->nb_tx_queues) {
390                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
391                 return -EINVAL;
392         }
393
394         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
395
396         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
397
398 }
399
400 static int
401 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
402 {
403         uint16_t old_nb_queues = dev->data->nb_tx_queues;
404         void **txq;
405         unsigned i;
406
407         if (dev->data->tx_queues == NULL) { /* first time configuration */
408                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
409                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
410                                 CACHE_LINE_SIZE);
411                 if (dev->data->tx_queues == NULL) {
412                         dev->data->nb_tx_queues = 0;
413                         return -(ENOMEM);
414                 }
415         } else { /* re-configure */
416                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
417
418                 txq = dev->data->tx_queues;
419
420                 for (i = nb_queues; i < old_nb_queues; i++)
421                         (*dev->dev_ops->tx_queue_release)(txq[i]);
422                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
423                                 CACHE_LINE_SIZE);
424                 if (txq == NULL)
425                         return -(ENOMEM);
426
427                 if (nb_queues > old_nb_queues)
428                         memset(txq + old_nb_queues, 0,
429                                 sizeof(txq[0]) * (nb_queues - old_nb_queues));
430
431                 dev->data->tx_queues = txq;
432
433         }
434         dev->data->nb_tx_queues = nb_queues;
435         return (0);
436 }
437
438 static int
439 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
440                       const struct rte_eth_conf *dev_conf)
441 {
442         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
443
444         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
445                 /* check multi-queue mode */
446                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ||
447                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
448                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
449                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
450                         /* SRIOV only works in VMDq enable mode */
451                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
452                                         " SRIOV active, "
453                                         "wrong VMDQ mq_mode rx %u tx %u\n",
454                                         port_id,
455                                         dev_conf->rxmode.mq_mode,
456                                         dev_conf->txmode.mq_mode);
457                         return (-EINVAL);
458                 }
459
460                 switch (dev_conf->rxmode.mq_mode) {
461                 case ETH_MQ_RX_VMDQ_RSS:
462                 case ETH_MQ_RX_VMDQ_DCB:
463                 case ETH_MQ_RX_VMDQ_DCB_RSS:
464                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
465                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
466                                         " SRIOV active, "
467                                         "unsupported VMDQ mq_mode rx %u\n",
468                                         port_id, dev_conf->rxmode.mq_mode);
469                         return (-EINVAL);
470                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
471                         /* if nothing mq mode configure, use default scheme */
472                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
473                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
474                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
475                         break;
476                 }
477
478                 switch (dev_conf->txmode.mq_mode) {
479                 case ETH_MQ_TX_VMDQ_DCB:
480                         /* DCB VMDQ in SRIOV mode, not implement yet */
481                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
482                                         " SRIOV active, "
483                                         "unsupported VMDQ mq_mode tx %u\n",
484                                         port_id, dev_conf->txmode.mq_mode);
485                         return (-EINVAL);
486                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
487                         /* if nothing mq mode configure, use default scheme */
488                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
489                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
490                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
491                         break;
492                 }
493
494                 /* check valid queue number */
495                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
496                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
497                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
498                                     "queue number must less equal to %d\n",
499                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
500                         return (-EINVAL);
501                 }
502         } else {
503                 /* For vmdb+dcb mode check our configuration before we go further */
504                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
505                         const struct rte_eth_vmdq_dcb_conf *conf;
506
507                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
508                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
509                                                 "!= %d\n",
510                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
511                                 return (-EINVAL);
512                         }
513                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
514                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
515                                conf->nb_queue_pools == ETH_32_POOLS)) {
516                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
517                                                 "nb_queue_pools must be %d or %d\n",
518                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
519                                 return (-EINVAL);
520                         }
521                 }
522                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
523                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
524
525                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
526                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
527                                                 "!= %d\n",
528                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
529                                 return (-EINVAL);
530                         }
531                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
532                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
533                                conf->nb_queue_pools == ETH_32_POOLS)) {
534                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
535                                                 "nb_queue_pools != %d or nb_queue_pools "
536                                                 "!= %d\n",
537                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
538                                 return (-EINVAL);
539                         }
540                 }
541
542                 /* For DCB mode check our configuration before we go further */
543                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
544                         const struct rte_eth_dcb_rx_conf *conf;
545
546                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
547                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
548                                                 "!= %d\n",
549                                                 port_id, ETH_DCB_NUM_QUEUES);
550                                 return (-EINVAL);
551                         }
552                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
553                         if (! (conf->nb_tcs == ETH_4_TCS ||
554                                conf->nb_tcs == ETH_8_TCS)) {
555                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
556                                                 "nb_tcs != %d or nb_tcs "
557                                                 "!= %d\n",
558                                                 port_id, ETH_4_TCS, ETH_8_TCS);
559                                 return (-EINVAL);
560                         }
561                 }
562
563                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
564                         const struct rte_eth_dcb_tx_conf *conf;
565
566                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
567                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
568                                                 "!= %d\n",
569                                                 port_id, ETH_DCB_NUM_QUEUES);
570                                 return (-EINVAL);
571                         }
572                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
573                         if (! (conf->nb_tcs == ETH_4_TCS ||
574                                conf->nb_tcs == ETH_8_TCS)) {
575                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
576                                                 "nb_tcs != %d or nb_tcs "
577                                                 "!= %d\n",
578                                                 port_id, ETH_4_TCS, ETH_8_TCS);
579                                 return (-EINVAL);
580                         }
581                 }
582         }
583         return 0;
584 }
585
586 int
587 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
588                       const struct rte_eth_conf *dev_conf)
589 {
590         struct rte_eth_dev *dev;
591         struct rte_eth_dev_info dev_info;
592         int diag;
593
594         /* This function is only safe when called from the primary process
595          * in a multi-process setup*/
596         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
597
598         if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
599                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
600                 return (-EINVAL);
601         }
602         dev = &rte_eth_devices[port_id];
603
604         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
605         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
606
607         if (dev->data->dev_started) {
608                 PMD_DEBUG_TRACE(
609                     "port %d must be stopped to allow configuration\n", port_id);
610                 return (-EBUSY);
611         }
612
613         /*
614          * Check that the numbers of RX and TX queues are not greater
615          * than the maximum number of RX and TX queues supported by the
616          * configured device.
617          */
618         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
619         if (nb_rx_q > dev_info.max_rx_queues) {
620                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
621                                 port_id, nb_rx_q, dev_info.max_rx_queues);
622                 return (-EINVAL);
623         }
624         if (nb_rx_q == 0) {
625                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
626                 return (-EINVAL);
627         }
628
629         if (nb_tx_q > dev_info.max_tx_queues) {
630                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
631                                 port_id, nb_tx_q, dev_info.max_tx_queues);
632                 return (-EINVAL);
633         }
634         if (nb_tx_q == 0) {
635                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
636                 return (-EINVAL);
637         }
638
639         /* Copy the dev_conf parameter into the dev structure */
640         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
641
642         /*
643          * If jumbo frames are enabled, check that the maximum RX packet
644          * length is supported by the configured device.
645          */
646         if (dev_conf->rxmode.jumbo_frame == 1) {
647                 if (dev_conf->rxmode.max_rx_pkt_len >
648                     dev_info.max_rx_pktlen) {
649                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
650                                 " > max valid value %u\n",
651                                 port_id,
652                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
653                                 (unsigned)dev_info.max_rx_pktlen);
654                         return (-EINVAL);
655                 }
656                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
657                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
658                                 " < min valid value %u\n",
659                                 port_id,
660                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
661                                 (unsigned)ETHER_MIN_LEN);
662                         return (-EINVAL);
663                 }
664         } else
665                 /* Use default value */
666                 dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
667
668         /* multipe queue mode checking */
669         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
670         if (diag != 0) {
671                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
672                                 port_id, diag);
673                 return diag;
674         }
675
676         /*
677          * Setup new number of RX/TX queues and reconfigure device.
678          */
679         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
680         if (diag != 0) {
681                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
682                                 port_id, diag);
683                 return diag;
684         }
685
686         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
687         if (diag != 0) {
688                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
689                                 port_id, diag);
690                 rte_eth_dev_rx_queue_config(dev, 0);
691                 return diag;
692         }
693
694         diag = (*dev->dev_ops->dev_configure)(dev);
695         if (diag != 0) {
696                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
697                                 port_id, diag);
698                 rte_eth_dev_rx_queue_config(dev, 0);
699                 rte_eth_dev_tx_queue_config(dev, 0);
700                 return diag;
701         }
702
703         return 0;
704 }
705
706 static void
707 rte_eth_dev_config_restore(uint8_t port_id)
708 {
709         struct rte_eth_dev *dev;
710         struct rte_eth_dev_info dev_info;
711         struct ether_addr addr;
712         uint16_t i;
713         uint32_t pool = 0;
714
715         dev = &rte_eth_devices[port_id];
716
717         rte_eth_dev_info_get(port_id, &dev_info);
718
719         if (RTE_ETH_DEV_SRIOV(dev).active)
720                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
721
722         /* replay MAC address configuration */
723         for (i = 0; i < dev_info.max_mac_addrs; i++) {
724                 addr = dev->data->mac_addrs[i];
725
726                 /* skip zero address */
727                 if (is_zero_ether_addr(&addr))
728                         continue;
729
730                 /* add address to the hardware */
731                 if  (*dev->dev_ops->mac_addr_add)
732                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
733                 else {
734                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
735                                         port_id);
736                         /* exit the loop but not return an error */
737                         break;
738                 }
739         }
740
741         /* replay promiscuous configuration */
742         if (rte_eth_promiscuous_get(port_id) == 1)
743                 rte_eth_promiscuous_enable(port_id);
744         else if (rte_eth_promiscuous_get(port_id) == 0)
745                 rte_eth_promiscuous_disable(port_id);
746
747         /* replay allmulticast configuration */
748         if (rte_eth_allmulticast_get(port_id) == 1)
749                 rte_eth_allmulticast_enable(port_id);
750         else if (rte_eth_allmulticast_get(port_id) == 0)
751                 rte_eth_allmulticast_disable(port_id);
752 }
753
754 int
755 rte_eth_dev_start(uint8_t port_id)
756 {
757         struct rte_eth_dev *dev;
758         int diag;
759
760         /* This function is only safe when called from the primary process
761          * in a multi-process setup*/
762         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
763
764         if (port_id >= nb_ports) {
765                 PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
766                 return (-EINVAL);
767         }
768         dev = &rte_eth_devices[port_id];
769
770         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
771
772         if (dev->data->dev_started != 0) {
773                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
774                         " already started\n",
775                         port_id);
776                 return (0);
777         }
778
779         diag = (*dev->dev_ops->dev_start)(dev);
780         if (diag == 0)
781                 dev->data->dev_started = 1;
782         else
783                 return diag;
784
785         rte_eth_dev_config_restore(port_id);
786
787         return 0;
788 }
789
790 void
791 rte_eth_dev_stop(uint8_t port_id)
792 {
793         struct rte_eth_dev *dev;
794
795         /* This function is only safe when called from the primary process
796          * in a multi-process setup*/
797         PROC_PRIMARY_OR_RET();
798
799         if (port_id >= nb_ports) {
800                 PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
801                 return;
802         }
803         dev = &rte_eth_devices[port_id];
804
805         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
806
807         if (dev->data->dev_started == 0) {
808                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
809                         " already stopped\n",
810                         port_id);
811                 return;
812         }
813
814         dev->data->dev_started = 0;
815         (*dev->dev_ops->dev_stop)(dev);
816 }
817
818 int
819 rte_eth_dev_set_link_up(uint8_t port_id)
820 {
821         struct rte_eth_dev *dev;
822
823         /* This function is only safe when called from the primary process
824          * in a multi-process setup*/
825         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
826
827         if (port_id >= nb_ports) {
828                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
829                 return -EINVAL;
830         }
831         dev = &rte_eth_devices[port_id];
832
833         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
834         return (*dev->dev_ops->dev_set_link_up)(dev);
835 }
836
837 int
838 rte_eth_dev_set_link_down(uint8_t port_id)
839 {
840         struct rte_eth_dev *dev;
841
842         /* This function is only safe when called from the primary process
843          * in a multi-process setup*/
844         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
845
846         if (port_id >= nb_ports) {
847                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
848                 return -EINVAL;
849         }
850         dev = &rte_eth_devices[port_id];
851
852         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
853         return (*dev->dev_ops->dev_set_link_down)(dev);
854 }
855
856 void
857 rte_eth_dev_close(uint8_t port_id)
858 {
859         struct rte_eth_dev *dev;
860
861         /* This function is only safe when called from the primary process
862          * in a multi-process setup*/
863         PROC_PRIMARY_OR_RET();
864
865         if (port_id >= nb_ports) {
866                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
867                 return;
868         }
869
870         dev = &rte_eth_devices[port_id];
871
872         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
873         dev->data->dev_started = 0;
874         (*dev->dev_ops->dev_close)(dev);
875 }
876
877 int
878 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
879                        uint16_t nb_rx_desc, unsigned int socket_id,
880                        const struct rte_eth_rxconf *rx_conf,
881                        struct rte_mempool *mp)
882 {
883         struct rte_eth_dev *dev;
884         struct rte_pktmbuf_pool_private *mbp_priv;
885         struct rte_eth_dev_info dev_info;
886
887         /* This function is only safe when called from the primary process
888          * in a multi-process setup*/
889         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
890
891         if (port_id >= nb_ports) {
892                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
893                 return (-EINVAL);
894         }
895         dev = &rte_eth_devices[port_id];
896         if (rx_queue_id >= dev->data->nb_rx_queues) {
897                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
898                 return (-EINVAL);
899         }
900
901         if (dev->data->dev_started) {
902                 PMD_DEBUG_TRACE(
903                     "port %d must be stopped to allow configuration\n", port_id);
904                 return -EBUSY;
905         }
906
907         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
908         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
909
910         /*
911          * Check the size of the mbuf data buffer.
912          * This value must be provided in the private data of the memory pool.
913          * First check that the memory pool has a valid private data.
914          */
915         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
916         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
917                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
918                                 mp->name, (int) mp->private_data_size,
919                                 (int) sizeof(struct rte_pktmbuf_pool_private));
920                 return (-ENOSPC);
921         }
922         mbp_priv = rte_mempool_get_priv(mp);
923         if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
924             dev_info.min_rx_bufsize) {
925                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
926                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
927                                 "=%d)\n",
928                                 mp->name,
929                                 (int)mbp_priv->mbuf_data_room_size,
930                                 (int)(RTE_PKTMBUF_HEADROOM +
931                                       dev_info.min_rx_bufsize),
932                                 (int)RTE_PKTMBUF_HEADROOM,
933                                 (int)dev_info.min_rx_bufsize);
934                 return (-EINVAL);
935         }
936
937         return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
938                                                socket_id, rx_conf, mp);
939 }
940
941 int
942 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
943                        uint16_t nb_tx_desc, unsigned int socket_id,
944                        const struct rte_eth_txconf *tx_conf)
945 {
946         struct rte_eth_dev *dev;
947
948         /* This function is only safe when called from the primary process
949          * in a multi-process setup*/
950         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
951
952         if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
953                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
954                 return (-EINVAL);
955         }
956         dev = &rte_eth_devices[port_id];
957         if (tx_queue_id >= dev->data->nb_tx_queues) {
958                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
959                 return (-EINVAL);
960         }
961
962         if (dev->data->dev_started) {
963                 PMD_DEBUG_TRACE(
964                     "port %d must be stopped to allow configuration\n", port_id);
965                 return -EBUSY;
966         }
967
968         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
969         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
970                                                socket_id, tx_conf);
971 }
972
973 void
974 rte_eth_promiscuous_enable(uint8_t port_id)
975 {
976         struct rte_eth_dev *dev;
977
978         if (port_id >= nb_ports) {
979                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
980                 return;
981         }
982         dev = &rte_eth_devices[port_id];
983
984         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
985         (*dev->dev_ops->promiscuous_enable)(dev);
986         dev->data->promiscuous = 1;
987 }
988
989 void
990 rte_eth_promiscuous_disable(uint8_t port_id)
991 {
992         struct rte_eth_dev *dev;
993
994         if (port_id >= nb_ports) {
995                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
996                 return;
997         }
998         dev = &rte_eth_devices[port_id];
999
1000         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1001         dev->data->promiscuous = 0;
1002         (*dev->dev_ops->promiscuous_disable)(dev);
1003 }
1004
1005 int
1006 rte_eth_promiscuous_get(uint8_t port_id)
1007 {
1008         struct rte_eth_dev *dev;
1009
1010         if (port_id >= nb_ports) {
1011                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1012                 return -1;
1013         }
1014
1015         dev = &rte_eth_devices[port_id];
1016         return dev->data->promiscuous;
1017 }
1018
1019 void
1020 rte_eth_allmulticast_enable(uint8_t port_id)
1021 {
1022         struct rte_eth_dev *dev;
1023
1024         if (port_id >= nb_ports) {
1025                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1026                 return;
1027         }
1028         dev = &rte_eth_devices[port_id];
1029
1030         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1031         (*dev->dev_ops->allmulticast_enable)(dev);
1032         dev->data->all_multicast = 1;
1033 }
1034
1035 void
1036 rte_eth_allmulticast_disable(uint8_t port_id)
1037 {
1038         struct rte_eth_dev *dev;
1039
1040         if (port_id >= nb_ports) {
1041                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1042                 return;
1043         }
1044         dev = &rte_eth_devices[port_id];
1045
1046         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1047         dev->data->all_multicast = 0;
1048         (*dev->dev_ops->allmulticast_disable)(dev);
1049 }
1050
1051 int
1052 rte_eth_allmulticast_get(uint8_t port_id)
1053 {
1054         struct rte_eth_dev *dev;
1055
1056         if (port_id >= nb_ports) {
1057                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1058                 return -1;
1059         }
1060
1061         dev = &rte_eth_devices[port_id];
1062         return dev->data->all_multicast;
1063 }
1064
1065 static inline int
1066 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1067                                 struct rte_eth_link *link)
1068 {
1069         struct rte_eth_link *dst = link;
1070         struct rte_eth_link *src = &(dev->data->dev_link);
1071
1072         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1073                                         *(uint64_t *)src) == 0)
1074                 return -1;
1075
1076         return 0;
1077 }
1078
1079 void
1080 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1081 {
1082         struct rte_eth_dev *dev;
1083
1084         if (port_id >= nb_ports) {
1085                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1086                 return;
1087         }
1088         dev = &rte_eth_devices[port_id];
1089         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1090
1091         if (dev->data->dev_conf.intr_conf.lsc != 0)
1092                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1093         else {
1094                 (*dev->dev_ops->link_update)(dev, 1);
1095                 *eth_link = dev->data->dev_link;
1096         }
1097 }
1098
1099 void
1100 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1101 {
1102         struct rte_eth_dev *dev;
1103
1104         if (port_id >= nb_ports) {
1105                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1106                 return;
1107         }
1108         dev = &rte_eth_devices[port_id];
1109         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1110
1111         if (dev->data->dev_conf.intr_conf.lsc != 0)
1112                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1113         else {
1114                 (*dev->dev_ops->link_update)(dev, 0);
1115                 *eth_link = dev->data->dev_link;
1116         }
1117 }
1118
1119 void
1120 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1121 {
1122         struct rte_eth_dev *dev;
1123
1124         if (port_id >= nb_ports) {
1125                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1126                 return;
1127         }
1128         dev = &rte_eth_devices[port_id];
1129         memset(stats, 0, sizeof(*stats));
1130
1131         FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
1132         (*dev->dev_ops->stats_get)(dev, stats);
1133         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1134 }
1135
1136 void
1137 rte_eth_stats_reset(uint8_t port_id)
1138 {
1139         struct rte_eth_dev *dev;
1140
1141         if (port_id >= nb_ports) {
1142                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1143                 return;
1144         }
1145         dev = &rte_eth_devices[port_id];
1146
1147         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1148         (*dev->dev_ops->stats_reset)(dev);
1149 }
1150
1151
1152 static int
1153 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1154                 uint8_t is_rx)
1155 {
1156         struct rte_eth_dev *dev;
1157
1158         if (port_id >= nb_ports) {
1159                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1160                 return -ENODEV;
1161         }
1162         dev = &rte_eth_devices[port_id];
1163
1164         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1165         return (*dev->dev_ops->queue_stats_mapping_set)
1166                         (dev, queue_id, stat_idx, is_rx);
1167 }
1168
1169
1170 int
1171 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1172                 uint8_t stat_idx)
1173 {
1174         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1175                         STAT_QMAP_TX);
1176 }
1177
1178
1179 int
1180 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1181                 uint8_t stat_idx)
1182 {
1183         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1184                         STAT_QMAP_RX);
1185 }
1186
1187
1188 void
1189 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1190 {
1191         struct rte_eth_dev *dev;
1192
1193         if (port_id >= nb_ports) {
1194                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1195                 return;
1196         }
1197         dev = &rte_eth_devices[port_id];
1198
1199         /* Default device offload capabilities to zero */
1200         dev_info->rx_offload_capa = 0;
1201         dev_info->tx_offload_capa = 0;
1202         dev_info->if_index = 0;
1203         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1204         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1205         dev_info->pci_dev = dev->pci_dev;
1206         if (dev->driver)
1207                 dev_info->driver_name = dev->driver->pci_drv.name;
1208 }
1209
1210 void
1211 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1212 {
1213         struct rte_eth_dev *dev;
1214
1215         if (port_id >= nb_ports) {
1216                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1217                 return;
1218         }
1219         dev = &rte_eth_devices[port_id];
1220         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1221 }
1222
1223 int
1224 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1225 {
1226         struct rte_eth_dev *dev;
1227
1228         if (port_id >= nb_ports) {
1229                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1230                 return (-ENODEV);
1231         }
1232         dev = &rte_eth_devices[port_id];
1233         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1234                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1235                 return (-ENOSYS);
1236         }
1237
1238         if (vlan_id > 4095) {
1239                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1240                                 port_id, (unsigned) vlan_id);
1241                 return (-EINVAL);
1242         }
1243         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1244         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1245         return (0);
1246 }
1247
1248 int
1249 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1250 {
1251         struct rte_eth_dev *dev;
1252
1253         if (port_id >= nb_ports) {
1254                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1255                 return (-ENODEV);
1256         }
1257
1258         dev = &rte_eth_devices[port_id];
1259         if (rx_queue_id >= dev->data->nb_rx_queues) {
1260                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1261                 return (-EINVAL);
1262         }
1263
1264         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1265         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1266
1267         return (0);
1268 }
1269
1270 int
1271 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1272 {
1273         struct rte_eth_dev *dev;
1274
1275         if (port_id >= nb_ports) {
1276                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1277                 return (-ENODEV);
1278         }
1279
1280         dev = &rte_eth_devices[port_id];
1281         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1282         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1283
1284         return (0);
1285 }
1286
1287 int
1288 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1289 {
1290         struct rte_eth_dev *dev;
1291         int ret = 0;
1292         int mask = 0;
1293         int cur, org = 0;
1294
1295         if (port_id >= nb_ports) {
1296                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1297                 return (-ENODEV);
1298         }
1299
1300         dev = &rte_eth_devices[port_id];
1301
1302         /*check which option changed by application*/
1303         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1304         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1305         if (cur != org){
1306                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1307                 mask |= ETH_VLAN_STRIP_MASK;
1308         }
1309
1310         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1311         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1312         if (cur != org){
1313                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1314                 mask |= ETH_VLAN_FILTER_MASK;
1315         }
1316
1317         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1318         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1319         if (cur != org){
1320                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1321                 mask |= ETH_VLAN_EXTEND_MASK;
1322         }
1323
1324         /*no change*/
1325         if(mask == 0)
1326                 return ret;
1327
1328         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1329         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1330
1331         return ret;
1332 }
1333
1334 int
1335 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1336 {
1337         struct rte_eth_dev *dev;
1338         int ret = 0;
1339
1340         if (port_id >= nb_ports) {
1341                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1342                 return (-ENODEV);
1343         }
1344
1345         dev = &rte_eth_devices[port_id];
1346
1347         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1348                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1349
1350         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1351                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1352
1353         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1354                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1355
1356         return ret;
1357 }
1358
1359
1360 int
1361 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1362                                       struct rte_fdir_filter *fdir_filter,
1363                                       uint8_t queue)
1364 {
1365         struct rte_eth_dev *dev;
1366
1367         if (port_id >= nb_ports) {
1368                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1369                 return (-ENODEV);
1370         }
1371
1372         dev = &rte_eth_devices[port_id];
1373
1374         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1375                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1376                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1377                 return (-ENOSYS);
1378         }
1379
1380         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1381              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1382             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1383                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1384                                 "None l4type, source & destinations ports " \
1385                                 "should be null!\n");
1386                 return (-EINVAL);
1387         }
1388
1389         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1390         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1391                                                                 queue);
1392 }
1393
1394 int
1395 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1396                                          struct rte_fdir_filter *fdir_filter,
1397                                          uint8_t queue)
1398 {
1399         struct rte_eth_dev *dev;
1400
1401         if (port_id >= nb_ports) {
1402                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1403                 return (-ENODEV);
1404         }
1405
1406         dev = &rte_eth_devices[port_id];
1407
1408         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1409                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1410                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1411                 return (-ENOSYS);
1412         }
1413
1414         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1415              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1416             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1417                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1418                                 "None l4type, source & destinations ports " \
1419                                 "should be null!\n");
1420                 return (-EINVAL);
1421         }
1422
1423         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1424         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1425                                                                 queue);
1426
1427 }
1428
1429 int
1430 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1431                                          struct rte_fdir_filter *fdir_filter)
1432 {
1433         struct rte_eth_dev *dev;
1434
1435         if (port_id >= nb_ports) {
1436                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1437                 return (-ENODEV);
1438         }
1439
1440         dev = &rte_eth_devices[port_id];
1441
1442         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1443                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1444                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1445                 return (-ENOSYS);
1446         }
1447
1448         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1449              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1450             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1451                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1452                                 "None l4type source & destinations ports " \
1453                                 "should be null!\n");
1454                 return (-EINVAL);
1455         }
1456
1457         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1458         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1459 }
1460
1461 int
1462 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1463 {
1464         struct rte_eth_dev *dev;
1465
1466         if (port_id >= nb_ports) {
1467                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1468                 return (-ENODEV);
1469         }
1470
1471         dev = &rte_eth_devices[port_id];
1472         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1473                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1474                 return (-ENOSYS);
1475         }
1476
1477         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1478
1479         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1480         return (0);
1481 }
1482
1483 int
1484 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1485                                     struct rte_fdir_filter *fdir_filter,
1486                                     uint16_t soft_id, uint8_t queue,
1487                                     uint8_t drop)
1488 {
1489         struct rte_eth_dev *dev;
1490
1491         if (port_id >= nb_ports) {
1492                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1493                 return (-ENODEV);
1494         }
1495
1496         dev = &rte_eth_devices[port_id];
1497
1498         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1499                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1500                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1501                 return (-ENOSYS);
1502         }
1503
1504         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1505              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1506             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1507                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1508                                 "None l4type, source & destinations ports " \
1509                                 "should be null!\n");
1510                 return (-EINVAL);
1511         }
1512
1513         /* For now IPv6 is not supported with perfect filter */
1514         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1515                 return (-ENOTSUP);
1516
1517         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1518         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1519                                                                 soft_id, queue,
1520                                                                 drop);
1521 }
1522
1523 int
1524 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1525                                        struct rte_fdir_filter *fdir_filter,
1526                                        uint16_t soft_id, uint8_t queue,
1527                                        uint8_t drop)
1528 {
1529         struct rte_eth_dev *dev;
1530
1531         if (port_id >= nb_ports) {
1532                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1533                 return (-ENODEV);
1534         }
1535
1536         dev = &rte_eth_devices[port_id];
1537
1538         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1539                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1540                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1541                 return (-ENOSYS);
1542         }
1543
1544         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1545              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1546             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1547                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1548                                 "None l4type, source & destinations ports " \
1549                                 "should be null!\n");
1550                 return (-EINVAL);
1551         }
1552
1553         /* For now IPv6 is not supported with perfect filter */
1554         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1555                 return (-ENOTSUP);
1556
1557         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1558         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1559                                                         soft_id, queue, drop);
1560 }
1561
1562 int
1563 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1564                                        struct rte_fdir_filter *fdir_filter,
1565                                        uint16_t soft_id)
1566 {
1567         struct rte_eth_dev *dev;
1568
1569         if (port_id >= nb_ports) {
1570                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1571                 return (-ENODEV);
1572         }
1573
1574         dev = &rte_eth_devices[port_id];
1575
1576         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1577                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1578                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1579                 return (-ENOSYS);
1580         }
1581
1582         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1583              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1584             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1585                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1586                                 "None l4type, source & destinations ports " \
1587                                 "should be null!\n");
1588                 return (-EINVAL);
1589         }
1590
1591         /* For now IPv6 is not supported with perfect filter */
1592         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1593                 return (-ENOTSUP);
1594
1595         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1596         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
1597                                                                 soft_id);
1598 }
1599
1600 int
1601 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1602 {
1603         struct rte_eth_dev *dev;
1604
1605         if (port_id >= nb_ports) {
1606                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1607                 return (-ENODEV);
1608         }
1609
1610         dev = &rte_eth_devices[port_id];
1611         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1612                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1613                 return (-ENOSYS);
1614         }
1615
1616         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1617         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1618 }
1619
1620 int
1621 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1622 {
1623         struct rte_eth_dev *dev;
1624
1625         if (port_id >= nb_ports) {
1626                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1627                 return (-ENODEV);
1628         }
1629
1630         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1631                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1632                 return (-EINVAL);
1633         }
1634
1635         dev = &rte_eth_devices[port_id];
1636         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1637         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1638 }
1639
1640 int
1641 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1642 {
1643         struct rte_eth_dev *dev;
1644
1645         if (port_id >= nb_ports) {
1646                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1647                 return (-ENODEV);
1648         }
1649
1650         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1651                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1652                 return (-EINVAL);
1653         }
1654
1655         dev = &rte_eth_devices[port_id];
1656         /* High water, low water validation are device specific */
1657         if  (*dev->dev_ops->priority_flow_ctrl_set)
1658                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1659         return (-ENOTSUP);
1660 }
1661
1662 int
1663 rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1664 {
1665         struct rte_eth_dev *dev;
1666         uint16_t max_rxq;
1667         uint8_t i,j;
1668
1669         if (port_id >= nb_ports) {
1670                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1671                 return (-ENODEV);
1672         }
1673
1674         /* Invalid mask bit(s) setting */
1675         if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1676                 PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
1677                 return (-EINVAL);
1678         }
1679
1680         dev = &rte_eth_devices[port_id];
1681         max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ?
1682                 dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE;
1683         if (reta_conf->mask_lo != 0) {
1684                 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1685                         if ((reta_conf->mask_lo & (1ULL << i)) &&
1686                                 (reta_conf->reta[i] >= max_rxq)) {
1687                                 PMD_DEBUG_TRACE("RETA hash index output"
1688                                         "configration for port=%d,invalid"
1689                                         "queue=%d\n",port_id,reta_conf->reta[i]);
1690
1691                                 return (-EINVAL);
1692                         }
1693                 }
1694         }
1695
1696         if (reta_conf->mask_hi != 0) {
1697                 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1698                         j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
1699
1700                         /* Check if the max entry >= 128 */
1701                         if ((reta_conf->mask_hi & (1ULL << i)) &&
1702                                 (reta_conf->reta[j] >= max_rxq)) {
1703                                 PMD_DEBUG_TRACE("RETA hash index output"
1704                                         "configration for port=%d,invalid"
1705                                         "queue=%d\n",port_id,reta_conf->reta[j]);
1706
1707                                 return (-EINVAL);
1708                         }
1709                 }
1710         }
1711
1712         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1713         return (*dev->dev_ops->reta_update)(dev, reta_conf);
1714 }
1715
1716 int
1717 rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1718 {
1719         struct rte_eth_dev *dev;
1720
1721         if (port_id >= nb_ports) {
1722                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1723                 return (-ENODEV);
1724         }
1725
1726         if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1727                 PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
1728                 return (-EINVAL);
1729         }
1730
1731         dev = &rte_eth_devices[port_id];
1732         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1733         return (*dev->dev_ops->reta_query)(dev, reta_conf);
1734 }
1735
1736 int
1737 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1738 {
1739         struct rte_eth_dev *dev;
1740         uint16_t rss_hash_protos;
1741
1742         if (port_id >= nb_ports) {
1743                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1744                 return (-ENODEV);
1745         }
1746         rss_hash_protos = rss_conf->rss_hf;
1747         if ((rss_hash_protos != 0) &&
1748             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1749                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1750                                 rss_hash_protos);
1751                 return (-EINVAL);
1752         }
1753         dev = &rte_eth_devices[port_id];
1754         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1755         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1756 }
1757
1758 int
1759 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1760                               struct rte_eth_rss_conf *rss_conf)
1761 {
1762         struct rte_eth_dev *dev;
1763
1764         if (port_id >= nb_ports) {
1765                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1766                 return (-ENODEV);
1767         }
1768         dev = &rte_eth_devices[port_id];
1769         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1770         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1771 }
1772
1773 int
1774 rte_eth_led_on(uint8_t port_id)
1775 {
1776         struct rte_eth_dev *dev;
1777
1778         if (port_id >= nb_ports) {
1779                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1780                 return (-ENODEV);
1781         }
1782
1783         dev = &rte_eth_devices[port_id];
1784         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1785         return ((*dev->dev_ops->dev_led_on)(dev));
1786 }
1787
1788 int
1789 rte_eth_led_off(uint8_t port_id)
1790 {
1791         struct rte_eth_dev *dev;
1792
1793         if (port_id >= nb_ports) {
1794                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1795                 return (-ENODEV);
1796         }
1797
1798         dev = &rte_eth_devices[port_id];
1799         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1800         return ((*dev->dev_ops->dev_led_off)(dev));
1801 }
1802
1803 /*
1804  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1805  * an empty spot.
1806  */
1807 static inline int
1808 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1809 {
1810         struct rte_eth_dev_info dev_info;
1811         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1812         unsigned i;
1813
1814         rte_eth_dev_info_get(port_id, &dev_info);
1815
1816         for (i = 0; i < dev_info.max_mac_addrs; i++)
1817                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1818                         return i;
1819
1820         return -1;
1821 }
1822
1823 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
1824
1825 int
1826 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
1827                         uint32_t pool)
1828 {
1829         struct rte_eth_dev *dev;
1830         int index;
1831         uint64_t pool_mask;
1832
1833         if (port_id >= nb_ports) {
1834                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1835                 return (-ENODEV);
1836         }
1837         dev = &rte_eth_devices[port_id];
1838         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
1839
1840         if (is_zero_ether_addr(addr)) {
1841                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
1842                         port_id);
1843                 return (-EINVAL);
1844         }
1845         if (pool >= ETH_64_POOLS) {
1846                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
1847                 return (-EINVAL);
1848         }
1849
1850         index = get_mac_addr_index(port_id, addr);
1851         if (index < 0) {
1852                 index = get_mac_addr_index(port_id, &null_mac_addr);
1853                 if (index < 0) {
1854                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1855                                 port_id);
1856                         return (-ENOSPC);
1857                 }
1858         } else {
1859                 pool_mask = dev->data->mac_pool_sel[index];
1860
1861                 /* Check if both MAC address and pool is alread there, and do nothing */
1862                 if (pool_mask & (1ULL << pool))
1863                         return 0;
1864         }
1865
1866         /* Update NIC */
1867         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
1868
1869         /* Update address in NIC data structure */
1870         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
1871
1872         /* Update pool bitmap in NIC data structure */
1873         dev->data->mac_pool_sel[index] |= (1ULL << pool);
1874
1875         return 0;
1876 }
1877
1878 int
1879 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
1880 {
1881         struct rte_eth_dev *dev;
1882         int index;
1883
1884         if (port_id >= nb_ports) {
1885                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1886                 return (-ENODEV);
1887         }
1888         dev = &rte_eth_devices[port_id];
1889         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
1890
1891         index = get_mac_addr_index(port_id, addr);
1892         if (index == 0) {
1893                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
1894                 return (-EADDRINUSE);
1895         } else if (index < 0)
1896                 return 0;  /* Do nothing if address wasn't found */
1897
1898         /* Update NIC */
1899         (*dev->dev_ops->mac_addr_remove)(dev, index);
1900
1901         /* Update address in NIC data structure */
1902         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
1903
1904         return 0;
1905 }
1906
1907 int
1908 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
1909                                 uint16_t rx_mode, uint8_t on)
1910 {
1911         uint16_t num_vfs;
1912         struct rte_eth_dev *dev;
1913         struct rte_eth_dev_info dev_info;
1914
1915         if (port_id >= nb_ports) {
1916                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
1917                                 port_id);
1918                 return (-ENODEV);
1919         }
1920
1921         dev = &rte_eth_devices[port_id];
1922         rte_eth_dev_info_get(port_id, &dev_info);
1923
1924         num_vfs = dev_info.max_vfs;
1925         if (vf > num_vfs)
1926         {
1927                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
1928                 return (-EINVAL);
1929         }
1930         if (rx_mode == 0)
1931         {
1932                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
1933                 return (-EINVAL);
1934         }
1935         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
1936         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
1937 }
1938
1939 /*
1940  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1941  * an empty spot.
1942  */
1943 static inline int
1944 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1945 {
1946         struct rte_eth_dev_info dev_info;
1947         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1948         unsigned i;
1949
1950         rte_eth_dev_info_get(port_id, &dev_info);
1951         if (!dev->data->hash_mac_addrs)
1952                 return -1;
1953
1954         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
1955                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
1956                         ETHER_ADDR_LEN) == 0)
1957                         return i;
1958
1959         return -1;
1960 }
1961
1962 int
1963 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
1964                                 uint8_t on)
1965 {
1966         int index;
1967         int ret;
1968         struct rte_eth_dev *dev;
1969
1970         if (port_id >= nb_ports) {
1971                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
1972                         port_id);
1973                 return (-ENODEV);
1974         }
1975
1976         dev = &rte_eth_devices[port_id];
1977         if (is_zero_ether_addr(addr)) {
1978                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
1979                         port_id);
1980                 return (-EINVAL);
1981         }
1982
1983         index = get_hash_mac_addr_index(port_id, addr);
1984         /* Check if it's already there, and do nothing */
1985         if ((index >= 0) && (on))
1986                 return 0;
1987
1988         if (index < 0) {
1989                 if (!on) {
1990                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
1991                                 "set in UTA\n", port_id);
1992                         return (-EINVAL);
1993                 }
1994
1995                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
1996                 if (index < 0) {
1997                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1998                                         port_id);
1999                         return (-ENOSPC);
2000                 }
2001         }
2002
2003         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2004         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2005         if (ret == 0) {
2006                 /* Update address in NIC data structure */
2007                 if (on)
2008                         ether_addr_copy(addr,
2009                                         &dev->data->hash_mac_addrs[index]);
2010                 else
2011                         ether_addr_copy(&null_mac_addr,
2012                                         &dev->data->hash_mac_addrs[index]);
2013         }
2014
2015         return ret;
2016 }
2017
2018 int
2019 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2020 {
2021         struct rte_eth_dev *dev;
2022
2023         if (port_id >= nb_ports) {
2024                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2025                         port_id);
2026                 return (-ENODEV);
2027         }
2028
2029         dev = &rte_eth_devices[port_id];
2030
2031         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2032         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2033 }
2034
2035 int
2036 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
2037 {
2038         uint16_t num_vfs;
2039         struct rte_eth_dev *dev;
2040         struct rte_eth_dev_info dev_info;
2041
2042         if (port_id >= nb_ports) {
2043                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2044                 return (-ENODEV);
2045         }
2046
2047         dev = &rte_eth_devices[port_id];
2048         rte_eth_dev_info_get(port_id, &dev_info);
2049
2050         num_vfs = dev_info.max_vfs;
2051         if (vf > num_vfs)
2052         {
2053                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2054                 return (-EINVAL);
2055         }
2056
2057         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2058         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2059 }
2060
2061 int
2062 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2063 {
2064         uint16_t num_vfs;
2065         struct rte_eth_dev *dev;
2066         struct rte_eth_dev_info dev_info;
2067
2068         if (port_id >= nb_ports) {
2069                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2070                 return (-ENODEV);
2071         }
2072
2073         dev = &rte_eth_devices[port_id];
2074         rte_eth_dev_info_get(port_id, &dev_info);
2075
2076         num_vfs = dev_info.max_vfs;
2077         if (vf > num_vfs)
2078         {
2079                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2080                 return (-EINVAL);
2081         }
2082
2083         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2084         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2085 }
2086
2087 int
2088 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2089                                  uint64_t vf_mask,uint8_t vlan_on)
2090 {
2091         struct rte_eth_dev *dev;
2092
2093         if (port_id >= nb_ports) {
2094                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2095                                 port_id);
2096                 return (-ENODEV);
2097         }
2098         dev = &rte_eth_devices[port_id];
2099
2100         if(vlan_id > ETHER_MAX_VLAN_ID)
2101         {
2102                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2103                         vlan_id);
2104                 return (-EINVAL);
2105         }
2106         if (vf_mask == 0)
2107         {
2108                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2109                 return (-EINVAL);
2110         }
2111
2112         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2113         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2114                                                 vf_mask,vlan_on);
2115 }
2116
2117 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2118                                         uint16_t tx_rate)
2119 {
2120         struct rte_eth_dev *dev;
2121         struct rte_eth_dev_info dev_info;
2122         struct rte_eth_link link;
2123
2124         if (port_id >= nb_ports) {
2125                 PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
2126                                 port_id);
2127                 return -ENODEV;
2128         }
2129
2130         dev = &rte_eth_devices[port_id];
2131         rte_eth_dev_info_get(port_id, &dev_info);
2132         link = dev->data->dev_link;
2133
2134         if (queue_idx > dev_info.max_tx_queues) {
2135                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2136                                 "invalid queue id=%d\n", port_id, queue_idx);
2137                 return -EINVAL;
2138         }
2139
2140         if (tx_rate > link.link_speed) {
2141                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2142                                 "bigger than link speed= %d\n",
2143                         tx_rate, link_speed);
2144                 return -EINVAL;
2145         }
2146
2147         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2148         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2149 }
2150
2151 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2152                                 uint64_t q_msk)
2153 {
2154         struct rte_eth_dev *dev;
2155         struct rte_eth_dev_info dev_info;
2156         struct rte_eth_link link;
2157
2158         if (q_msk == 0)
2159                 return 0;
2160
2161         if (port_id >= nb_ports) {
2162                 PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
2163                                 port_id);
2164                 return -ENODEV;
2165         }
2166
2167         dev = &rte_eth_devices[port_id];
2168         rte_eth_dev_info_get(port_id, &dev_info);
2169         link = dev->data->dev_link;
2170
2171         if (vf > dev_info.max_vfs) {
2172                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2173                                 "invalid vf id=%d\n", port_id, vf);
2174                 return -EINVAL;
2175         }
2176
2177         if (tx_rate > link.link_speed) {
2178                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2179                                 "bigger than link speed= %d\n",
2180                                 tx_rate, link_speed);
2181                 return -EINVAL;
2182         }
2183
2184         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2185         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2186 }
2187
2188 int
2189 rte_eth_mirror_rule_set(uint8_t port_id,
2190                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
2191                         uint8_t rule_id, uint8_t on)
2192 {
2193         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2194
2195         if (port_id >= nb_ports) {
2196                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2197                 return (-ENODEV);
2198         }
2199
2200         if (mirror_conf->rule_type_mask == 0) {
2201                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2202                 return (-EINVAL);
2203         }
2204
2205         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2206                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
2207                         "be 0-%d\n",ETH_64_POOLS - 1);
2208                 return (-EINVAL);
2209         }
2210
2211         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
2212                 (mirror_conf->pool_mask == 0)) {
2213                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
2214                                 "be 0.\n");
2215                 return (-EINVAL);
2216         }
2217
2218         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2219         {
2220                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2221                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
2222                 return (-EINVAL);
2223         }
2224
2225         dev = &rte_eth_devices[port_id];
2226         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2227
2228         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2229 }
2230
2231 int
2232 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2233 {
2234         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2235
2236         if (port_id >= nb_ports) {
2237                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2238                 return (-ENODEV);
2239         }
2240
2241         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2242         {
2243                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2244                         ETH_VMDQ_NUM_MIRROR_RULE-1);
2245                 return (-EINVAL);
2246         }
2247
2248         dev = &rte_eth_devices[port_id];
2249         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2250
2251         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2252 }
2253
2254 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2255 uint16_t
2256 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2257                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2258 {
2259         struct rte_eth_dev *dev;
2260
2261         if (port_id >= nb_ports) {
2262                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2263                 return 0;
2264         }
2265         dev = &rte_eth_devices[port_id];
2266         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
2267         if (queue_id >= dev->data->nb_rx_queues) {
2268                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2269                 return 0;
2270         }
2271         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2272                                                 rx_pkts, nb_pkts);
2273 }
2274
2275 uint16_t
2276 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2277                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2278 {
2279         struct rte_eth_dev *dev;
2280
2281         if (port_id >= nb_ports) {
2282                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2283                 return 0;
2284         }
2285         dev = &rte_eth_devices[port_id];
2286
2287         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
2288         if (queue_id >= dev->data->nb_tx_queues) {
2289                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2290                 return 0;
2291         }
2292         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2293                                                 tx_pkts, nb_pkts);
2294 }
2295
2296 uint32_t
2297 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2298 {
2299         struct rte_eth_dev *dev;
2300
2301         if (port_id >= nb_ports) {
2302                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2303                 return 0;
2304         }
2305         dev = &rte_eth_devices[port_id];
2306         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2307         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2308 }
2309
2310 int
2311 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2312 {
2313         struct rte_eth_dev *dev;
2314
2315         if (port_id >= nb_ports) {
2316                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2317                 return (-ENODEV);
2318         }
2319         dev = &rte_eth_devices[port_id];
2320         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2321         return (*dev->dev_ops->rx_descriptor_done)( \
2322                 dev->data->rx_queues[queue_id], offset);
2323 }
2324 #endif
2325
2326 int
2327 rte_eth_dev_callback_register(uint8_t port_id,
2328                         enum rte_eth_event_type event,
2329                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2330 {
2331         struct rte_eth_dev *dev;
2332         struct rte_eth_dev_callback *user_cb;
2333
2334         if (!cb_fn)
2335                 return (-EINVAL);
2336         if (port_id >= nb_ports) {
2337                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2338                 return (-EINVAL);
2339         }
2340
2341         dev = &rte_eth_devices[port_id];
2342         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2343
2344         TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
2345                 if (user_cb->cb_fn == cb_fn &&
2346                         user_cb->cb_arg == cb_arg &&
2347                         user_cb->event == event) {
2348                         break;
2349                 }
2350         }
2351
2352         /* create a new callback. */
2353         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2354                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2355                 user_cb->cb_fn = cb_fn;
2356                 user_cb->cb_arg = cb_arg;
2357                 user_cb->event = event;
2358                 TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
2359         }
2360
2361         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2362         return ((user_cb == NULL) ? -ENOMEM : 0);
2363 }
2364
2365 int
2366 rte_eth_dev_callback_unregister(uint8_t port_id,
2367                         enum rte_eth_event_type event,
2368                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2369 {
2370         int ret;
2371         struct rte_eth_dev *dev;
2372         struct rte_eth_dev_callback *cb, *next;
2373
2374         if (!cb_fn)
2375                 return (-EINVAL);
2376         if (port_id >= nb_ports) {
2377                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2378                 return (-EINVAL);
2379         }
2380
2381         dev = &rte_eth_devices[port_id];
2382         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2383
2384         ret = 0;
2385         for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
2386
2387                 next = TAILQ_NEXT(cb, next);
2388
2389                 if (cb->cb_fn != cb_fn || cb->event != event ||
2390                                 (cb->cb_arg != (void *)-1 &&
2391                                 cb->cb_arg != cb_arg))
2392                         continue;
2393
2394                 /*
2395                  * if this callback is not executing right now,
2396                  * then remove it.
2397                  */
2398                 if (cb->active == 0) {
2399                         TAILQ_REMOVE(&(dev->callbacks), cb, next);
2400                         rte_free(cb);
2401                 } else {
2402                         ret = -EAGAIN;
2403                 }
2404         }
2405
2406         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2407         return (ret);
2408 }
2409
2410 void
2411 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2412         enum rte_eth_event_type event)
2413 {
2414         struct rte_eth_dev_callback *cb_lst;
2415         struct rte_eth_dev_callback dev_cb;
2416
2417         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2418         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
2419                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2420                         continue;
2421                 dev_cb = *cb_lst;
2422                 cb_lst->active = 1;
2423                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2424                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2425                                                 dev_cb.cb_arg);
2426                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2427                 cb_lst->active = 0;
2428         }
2429         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2430 }
2431 #ifdef RTE_NIC_BYPASS
2432 int rte_eth_dev_bypass_init(uint8_t port_id)
2433 {
2434         struct rte_eth_dev *dev;
2435
2436         if (port_id >= nb_ports) {
2437                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2438                 return (-ENODEV);
2439         }
2440
2441         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2442                 PMD_DEBUG_TRACE("Invalid port device\n");
2443                 return (-ENODEV);
2444         }
2445
2446         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2447         (*dev->dev_ops->bypass_init)(dev);
2448         return 0;
2449 }
2450
2451 int
2452 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2453 {
2454         struct rte_eth_dev *dev;
2455
2456         if (port_id >= nb_ports) {
2457                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2458                 return (-ENODEV);
2459         }
2460
2461         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2462                 PMD_DEBUG_TRACE("Invalid port device\n");
2463                 return (-ENODEV);
2464         }
2465         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2466         (*dev->dev_ops->bypass_state_show)(dev, state);
2467         return 0;
2468 }
2469
2470 int
2471 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2472 {
2473         struct rte_eth_dev *dev;
2474
2475         if (port_id >= nb_ports) {
2476                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2477                 return (-ENODEV);
2478         }
2479
2480         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2481                 PMD_DEBUG_TRACE("Invalid port device\n");
2482                 return (-ENODEV);
2483         }
2484
2485         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2486         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2487         return 0;
2488 }
2489
2490 int
2491 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2492 {
2493         struct rte_eth_dev *dev;
2494
2495         if (port_id >= nb_ports) {
2496                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2497                 return (-ENODEV);
2498         }
2499
2500         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2501                 PMD_DEBUG_TRACE("Invalid port device\n");
2502                 return (-ENODEV);
2503         }
2504
2505         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2506         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2507         return 0;
2508 }
2509
2510 int
2511 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2512 {
2513         struct rte_eth_dev *dev;
2514
2515         if (port_id >= nb_ports) {
2516                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2517                 return (-ENODEV);
2518         }
2519
2520         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2521                 PMD_DEBUG_TRACE("Invalid port device\n");
2522                 return (-ENODEV);
2523         }
2524
2525         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2526         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2527         return 0;
2528 }
2529
2530 int
2531 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2532 {
2533         struct rte_eth_dev *dev;
2534
2535         if (port_id >= nb_ports) {
2536                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2537                 return (-ENODEV);
2538         }
2539
2540         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2541                 PMD_DEBUG_TRACE("Invalid port device\n");
2542                 return (-ENODEV);
2543         }
2544
2545         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2546         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2547         return 0;
2548 }
2549
2550 int
2551 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2552 {
2553         struct rte_eth_dev *dev;
2554
2555         if (port_id >= nb_ports) {
2556                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2557                 return (-ENODEV);
2558         }
2559
2560         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2561                 PMD_DEBUG_TRACE("Invalid port device\n");
2562                 return (-ENODEV);
2563         }
2564
2565         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2566         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2567         return 0;
2568 }
2569
2570 int
2571 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2572 {
2573         struct rte_eth_dev *dev;
2574
2575         if (port_id >= nb_ports) {
2576                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2577                 return (-ENODEV);
2578         }
2579
2580         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2581                 PMD_DEBUG_TRACE("Invalid port device\n");
2582                 return (-ENODEV);
2583         }
2584
2585         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2586         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2587         return 0;
2588 }
2589
2590 int
2591 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2592 {
2593         struct rte_eth_dev *dev;
2594
2595         if (port_id >= nb_ports) {
2596                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2597                 return (-ENODEV);
2598         }
2599
2600         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2601                 PMD_DEBUG_TRACE("Invalid port device\n");
2602                 return (-ENODEV);
2603         }
2604
2605         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2606         (*dev->dev_ops->bypass_wd_reset)(dev);
2607         return 0;
2608 }
2609 #endif
2610
2611 int
2612 rte_eth_dev_add_syn_filter(uint8_t port_id,
2613                         struct rte_syn_filter *filter, uint16_t rx_queue)
2614 {
2615         struct rte_eth_dev *dev;
2616
2617         if (port_id >= nb_ports) {
2618                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2619                 return -ENODEV;
2620         }
2621
2622         dev = &rte_eth_devices[port_id];
2623         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_syn_filter, -ENOTSUP);
2624         return (*dev->dev_ops->add_syn_filter)(dev, filter, rx_queue);
2625 }
2626
2627 int
2628 rte_eth_dev_remove_syn_filter(uint8_t port_id)
2629 {
2630         struct rte_eth_dev *dev;
2631
2632         if (port_id >= nb_ports) {
2633                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2634                 return -ENODEV;
2635         }
2636
2637         dev = &rte_eth_devices[port_id];
2638         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_syn_filter, -ENOTSUP);
2639         return (*dev->dev_ops->remove_syn_filter)(dev);
2640 }
2641
2642 int
2643 rte_eth_dev_get_syn_filter(uint8_t port_id,
2644                         struct rte_syn_filter *filter, uint16_t *rx_queue)
2645 {
2646         struct rte_eth_dev *dev;
2647
2648         if (filter == NULL || rx_queue == NULL)
2649                 return -EINVAL;
2650
2651         if (port_id >= nb_ports) {
2652                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2653                 return -ENODEV;
2654         }
2655
2656         dev = &rte_eth_devices[port_id];
2657         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_syn_filter, -ENOTSUP);
2658         return (*dev->dev_ops->get_syn_filter)(dev, filter, rx_queue);
2659 }
2660
2661 int
2662 rte_eth_dev_add_ethertype_filter(uint8_t port_id, uint16_t index,
2663                         struct rte_ethertype_filter *filter, uint16_t rx_queue)
2664 {
2665         struct rte_eth_dev *dev;
2666
2667         if (port_id >= nb_ports) {
2668                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2669                 return -ENODEV;
2670         }
2671         if (filter->ethertype == ETHER_TYPE_IPv4 ||
2672                 filter->ethertype == ETHER_TYPE_IPv6){
2673                 PMD_DEBUG_TRACE("IP and IPv6 are not supported"
2674                         " in ethertype filter\n");
2675                 return -EINVAL;
2676         }
2677         dev = &rte_eth_devices[port_id];
2678         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_ethertype_filter, -ENOTSUP);
2679         return (*dev->dev_ops->add_ethertype_filter)(dev, index,
2680                                         filter, rx_queue);
2681 }
2682
2683 int
2684 rte_eth_dev_remove_ethertype_filter(uint8_t port_id,  uint16_t index)
2685 {
2686         struct rte_eth_dev *dev;
2687
2688         if (port_id >= nb_ports) {
2689                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2690                 return -ENODEV;
2691         }
2692
2693         dev = &rte_eth_devices[port_id];
2694         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_ethertype_filter, -ENOTSUP);
2695         return (*dev->dev_ops->remove_ethertype_filter)(dev, index);
2696 }
2697
2698 int
2699 rte_eth_dev_get_ethertype_filter(uint8_t port_id, uint16_t index,
2700                         struct rte_ethertype_filter *filter, uint16_t *rx_queue)
2701 {
2702         struct rte_eth_dev *dev;
2703
2704         if (filter == NULL || rx_queue == NULL)
2705                 return -EINVAL;
2706
2707         if (port_id >= nb_ports) {
2708                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2709                 return -ENODEV;
2710         }
2711
2712         dev = &rte_eth_devices[port_id];
2713         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_ethertype_filter, -ENOTSUP);
2714         return (*dev->dev_ops->get_ethertype_filter)(dev, index,
2715                                                 filter, rx_queue);
2716 }
2717
2718 int
2719 rte_eth_dev_add_2tuple_filter(uint8_t port_id, uint16_t index,
2720                         struct rte_2tuple_filter *filter, uint16_t rx_queue)
2721 {
2722         struct rte_eth_dev *dev;
2723
2724         if (port_id >= nb_ports) {
2725                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2726                 return -ENODEV;
2727         }
2728         if (filter->protocol != IPPROTO_TCP &&
2729                 filter->tcp_flags != 0){
2730                 PMD_DEBUG_TRACE("tcp flags is 0x%x, but the protocol value"
2731                         " is not TCP\n",
2732                         filter->tcp_flags);
2733                 return -EINVAL;
2734         }
2735
2736         dev = &rte_eth_devices[port_id];
2737         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_2tuple_filter, -ENOTSUP);
2738         return (*dev->dev_ops->add_2tuple_filter)(dev, index, filter, rx_queue);
2739 }
2740
2741 int
2742 rte_eth_dev_remove_2tuple_filter(uint8_t port_id, uint16_t index)
2743 {
2744         struct rte_eth_dev *dev;
2745
2746         if (port_id >= nb_ports) {
2747                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2748                 return -ENODEV;
2749         }
2750
2751         dev = &rte_eth_devices[port_id];
2752         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_2tuple_filter, -ENOTSUP);
2753         return (*dev->dev_ops->remove_2tuple_filter)(dev, index);
2754 }
2755
2756 int
2757 rte_eth_dev_get_2tuple_filter(uint8_t port_id, uint16_t index,
2758                         struct rte_2tuple_filter *filter, uint16_t *rx_queue)
2759 {
2760         struct rte_eth_dev *dev;
2761
2762         if (filter == NULL || rx_queue == NULL)
2763                 return -EINVAL;
2764
2765         if (port_id >= nb_ports) {
2766                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2767                 return -ENODEV;
2768         }
2769
2770         dev = &rte_eth_devices[port_id];
2771         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_2tuple_filter, -ENOTSUP);
2772         return (*dev->dev_ops->get_2tuple_filter)(dev, index, filter, rx_queue);
2773 }
2774
2775 int
2776 rte_eth_dev_add_5tuple_filter(uint8_t port_id, uint16_t index,
2777                         struct rte_5tuple_filter *filter, uint16_t rx_queue)
2778 {
2779         struct rte_eth_dev *dev;
2780
2781         if (port_id >= nb_ports) {
2782                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2783                 return -ENODEV;
2784         }
2785
2786         if (filter->protocol != IPPROTO_TCP &&
2787                 filter->tcp_flags != 0){
2788                 PMD_DEBUG_TRACE("tcp flags is 0x%x, but the protocol value"
2789                         " is not TCP\n",
2790                         filter->tcp_flags);
2791                 return -EINVAL;
2792         }
2793
2794         dev = &rte_eth_devices[port_id];
2795         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_5tuple_filter, -ENOTSUP);
2796         return (*dev->dev_ops->add_5tuple_filter)(dev, index, filter, rx_queue);
2797 }
2798
2799 int
2800 rte_eth_dev_remove_5tuple_filter(uint8_t port_id, uint16_t index)
2801 {
2802         struct rte_eth_dev *dev;
2803
2804         if (port_id >= nb_ports) {
2805                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2806                 return -ENODEV;
2807         }
2808
2809         dev = &rte_eth_devices[port_id];
2810         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_5tuple_filter, -ENOTSUP);
2811         return (*dev->dev_ops->remove_5tuple_filter)(dev, index);
2812 }
2813
2814 int
2815 rte_eth_dev_get_5tuple_filter(uint8_t port_id, uint16_t index,
2816                         struct rte_5tuple_filter *filter, uint16_t *rx_queue)
2817 {
2818         struct rte_eth_dev *dev;
2819
2820         if (filter == NULL || rx_queue == NULL)
2821                 return -EINVAL;
2822
2823         if (port_id >= nb_ports) {
2824                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2825                 return -ENODEV;
2826         }
2827
2828         dev = &rte_eth_devices[port_id];
2829         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_5tuple_filter, -ENOTSUP);
2830         return (*dev->dev_ops->get_5tuple_filter)(dev, index, filter,
2831                                                 rx_queue);
2832 }
2833
2834 int
2835 rte_eth_dev_add_flex_filter(uint8_t port_id, uint16_t index,
2836                         struct rte_flex_filter *filter, uint16_t rx_queue)
2837 {
2838         struct rte_eth_dev *dev;
2839
2840         if (port_id >= nb_ports) {
2841                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2842                 return -ENODEV;
2843         }
2844
2845         dev = &rte_eth_devices[port_id];
2846         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_flex_filter, -ENOTSUP);
2847         return (*dev->dev_ops->add_flex_filter)(dev, index, filter, rx_queue);
2848 }
2849
2850 int
2851 rte_eth_dev_remove_flex_filter(uint8_t port_id, uint16_t index)
2852 {
2853         struct rte_eth_dev *dev;
2854
2855         if (port_id >= nb_ports) {
2856                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2857                 return -ENODEV;
2858         }
2859
2860         dev = &rte_eth_devices[port_id];
2861         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_flex_filter, -ENOTSUP);
2862         return (*dev->dev_ops->remove_flex_filter)(dev, index);
2863 }
2864
2865 int
2866 rte_eth_dev_get_flex_filter(uint8_t port_id, uint16_t index,
2867                         struct rte_flex_filter *filter, uint16_t *rx_queue)
2868 {
2869         struct rte_eth_dev *dev;
2870
2871         if (filter == NULL || rx_queue == NULL)
2872                 return -EINVAL;
2873
2874         if (port_id >= nb_ports) {
2875                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2876                 return -ENODEV;
2877         }
2878
2879         dev = &rte_eth_devices[port_id];
2880         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_flex_filter, -ENOTSUP);
2881         return (*dev->dev_ops->get_flex_filter)(dev, index, filter,
2882                                                 rx_queue);
2883 }