ethdev: Tx rate limitation for queue and VF
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44
45 #include <rte_byteorder.h>
46 #include <rte_log.h>
47 #include <rte_debug.h>
48 #include <rte_interrupts.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
72 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
73                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
74         } while (0)
75 #else
76 #define PMD_DEBUG_TRACE(fmt, args...)
77 #endif
78
79 /* Macros for checking for restricting functions to primary instance only */
80 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
81         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
82                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
83                 return (retval); \
84         } \
85 } while(0)
86 #define PROC_PRIMARY_OR_RET() do { \
87         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
88                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
89                 return; \
90         } \
91 } while(0)
92
93 /* Macros to check for invlaid function pointers in dev_ops structure */
94 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
95         if ((func) == NULL) { \
96                 PMD_DEBUG_TRACE("Function not supported\n"); \
97                 return (retval); \
98         } \
99 } while(0)
100 #define FUNC_PTR_OR_RET(func) do { \
101         if ((func) == NULL) { \
102                 PMD_DEBUG_TRACE("Function not supported\n"); \
103                 return; \
104         } \
105 } while(0)
106
107 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
108 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
109 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
110 static uint8_t nb_ports = 0;
111
112 /* spinlock for eth device callbacks */
113 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
114
115 /**
116  * The user application callback description.
117  *
118  * It contains callback address to be registered by user application,
119  * the pointer to the parameters for callback, and the event type.
120  */
121 struct rte_eth_dev_callback {
122         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
123         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
124         void *cb_arg;                           /**< Parameter for callback */
125         enum rte_eth_event_type event;          /**< Interrupt event type */
126         uint32_t active;                        /**< Callback is executing */
127 };
128
129 enum {
130         STAT_QMAP_TX = 0,
131         STAT_QMAP_RX
132 };
133
134 static inline void
135 rte_eth_dev_data_alloc(void)
136 {
137         const unsigned flags = 0;
138         const struct rte_memzone *mz;
139
140         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
141                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
142                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
143                                 rte_socket_id(), flags);
144         } else
145                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
146         if (mz == NULL)
147                 rte_panic("Cannot allocate memzone for ethernet port data\n");
148
149         rte_eth_dev_data = mz->addr;
150         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
151                 memset(rte_eth_dev_data, 0,
152                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
153 }
154
155 struct rte_eth_dev *
156 rte_eth_dev_allocate(void)
157 {
158         struct rte_eth_dev *eth_dev;
159
160         if (nb_ports == RTE_MAX_ETHPORTS) {
161                 PMD_DEBUG_TRACE("Reached maximum number of ethernet ports\n");
162                 return NULL;
163         }
164
165         if (rte_eth_dev_data == NULL)
166                 rte_eth_dev_data_alloc();
167
168         eth_dev = &rte_eth_devices[nb_ports];
169         eth_dev->data = &rte_eth_dev_data[nb_ports];
170         eth_dev->data->port_id = nb_ports++;
171         return eth_dev;
172 }
173
174 static int
175 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
176                  struct rte_pci_device *pci_dev)
177 {
178         struct eth_driver    *eth_drv;
179         struct rte_eth_dev *eth_dev;
180         int diag;
181
182         eth_drv = (struct eth_driver *)pci_drv;
183
184         eth_dev = rte_eth_dev_allocate();
185         if (eth_dev == NULL)
186                 return -ENOMEM;
187
188         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
189                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
190                                   eth_drv->dev_private_size,
191                                   CACHE_LINE_SIZE);
192                 if (eth_dev->data->dev_private == NULL)
193                         rte_panic("Cannot allocate memzone for private port data\n");
194         }
195         eth_dev->pci_dev = pci_dev;
196         eth_dev->driver = eth_drv;
197         eth_dev->data->rx_mbuf_alloc_failed = 0;
198
199         /* init user callbacks */
200         TAILQ_INIT(&(eth_dev->callbacks));
201
202         /*
203          * Set the default maximum frame size.
204          */
205         eth_dev->data->max_frame_size = ETHER_MAX_LEN;
206
207         /* Invoke PMD device initialization function */
208         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
209         if (diag == 0)
210                 return (0);
211
212         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
213                         " failed\n", pci_drv->name,
214                         (unsigned) pci_dev->id.vendor_id,
215                         (unsigned) pci_dev->id.device_id);
216         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
217                 rte_free(eth_dev->data->dev_private);
218         nb_ports--;
219         return diag;
220 }
221
222 /**
223  * Register an Ethernet [Poll Mode] driver.
224  *
225  * Function invoked by the initialization function of an Ethernet driver
226  * to simultaneously register itself as a PCI driver and as an Ethernet
227  * Poll Mode Driver.
228  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
229  * structure embedded in the *eth_drv* structure, after having stored the
230  * address of the rte_eth_dev_init() function in the *devinit* field of
231  * the *pci_drv* structure.
232  * During the PCI probing phase, the rte_eth_dev_init() function is
233  * invoked for each PCI [Ethernet device] matching the embedded PCI
234  * identifiers provided by the driver.
235  */
236 void
237 rte_eth_driver_register(struct eth_driver *eth_drv)
238 {
239         eth_drv->pci_drv.devinit = rte_eth_dev_init;
240         rte_eal_pci_register(&eth_drv->pci_drv);
241 }
242
243 int
244 rte_eth_dev_socket_id(uint8_t port_id)
245 {
246         if (port_id >= nb_ports)
247                 return -1;
248         return rte_eth_devices[port_id].pci_dev->numa_node;
249 }
250
251 uint8_t
252 rte_eth_dev_count(void)
253 {
254         return (nb_ports);
255 }
256
257 static int
258 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
259 {
260         uint16_t old_nb_queues = dev->data->nb_rx_queues;
261         void **rxq;
262         unsigned i;
263
264         if (dev->data->rx_queues == NULL) { /* first time configuration */
265                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
266                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
267                                 CACHE_LINE_SIZE);
268                 if (dev->data->rx_queues == NULL) {
269                         dev->data->nb_rx_queues = 0;
270                         return -(ENOMEM);
271                 }
272         } else { /* re-configure */
273                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
274
275                 rxq = dev->data->rx_queues;
276
277                 for (i = nb_queues; i < old_nb_queues; i++)
278                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
279                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
280                                 CACHE_LINE_SIZE);
281                 if (rxq == NULL)
282                         return -(ENOMEM);
283
284                 if (nb_queues > old_nb_queues)
285                         memset(rxq + old_nb_queues, 0,
286                                 sizeof(rxq[0]) * (nb_queues - old_nb_queues));
287
288                 dev->data->rx_queues = rxq;
289
290         }
291         dev->data->nb_rx_queues = nb_queues;
292         return (0);
293 }
294
295 int
296 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
297 {
298         struct rte_eth_dev *dev;
299
300         /* This function is only safe when called from the primary process
301          * in a multi-process setup*/
302         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
303
304         if (port_id >= nb_ports) {
305                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
306                 return -EINVAL;
307         }
308
309         dev = &rte_eth_devices[port_id];
310         if (rx_queue_id >= dev->data->nb_rx_queues) {
311                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
312                 return -EINVAL;
313         }
314
315         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
316
317         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
318
319 }
320
321 int
322 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
323 {
324         struct rte_eth_dev *dev;
325
326         /* This function is only safe when called from the primary process
327          * in a multi-process setup*/
328         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
329
330         if (port_id >= nb_ports) {
331                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
332                 return -EINVAL;
333         }
334
335         dev = &rte_eth_devices[port_id];
336         if (rx_queue_id >= dev->data->nb_rx_queues) {
337                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
338                 return -EINVAL;
339         }
340
341         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
342
343         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
344
345 }
346
347 int
348 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
349 {
350         struct rte_eth_dev *dev;
351
352         /* This function is only safe when called from the primary process
353          * in a multi-process setup*/
354         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
355
356         if (port_id >= nb_ports) {
357                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
358                 return -EINVAL;
359         }
360
361         dev = &rte_eth_devices[port_id];
362         if (tx_queue_id >= dev->data->nb_tx_queues) {
363                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
364                 return -EINVAL;
365         }
366
367         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
368
369         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
370
371 }
372
373 int
374 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
375 {
376         struct rte_eth_dev *dev;
377
378         /* This function is only safe when called from the primary process
379          * in a multi-process setup*/
380         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
381
382         if (port_id >= nb_ports) {
383                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
384                 return -EINVAL;
385         }
386
387         dev = &rte_eth_devices[port_id];
388         if (tx_queue_id >= dev->data->nb_tx_queues) {
389                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
390                 return -EINVAL;
391         }
392
393         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
394
395         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
396
397 }
398
399 static int
400 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
401 {
402         uint16_t old_nb_queues = dev->data->nb_tx_queues;
403         void **txq;
404         unsigned i;
405
406         if (dev->data->tx_queues == NULL) { /* first time configuration */
407                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
408                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
409                                 CACHE_LINE_SIZE);
410                 if (dev->data->tx_queues == NULL) {
411                         dev->data->nb_tx_queues = 0;
412                         return -(ENOMEM);
413                 }
414         } else { /* re-configure */
415                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
416
417                 txq = dev->data->tx_queues;
418
419                 for (i = nb_queues; i < old_nb_queues; i++)
420                         (*dev->dev_ops->tx_queue_release)(txq[i]);
421                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
422                                 CACHE_LINE_SIZE);
423                 if (txq == NULL)
424                         return -(ENOMEM);
425
426                 if (nb_queues > old_nb_queues)
427                         memset(txq + old_nb_queues, 0,
428                                 sizeof(txq[0]) * (nb_queues - old_nb_queues));
429
430                 dev->data->tx_queues = txq;
431
432         }
433         dev->data->nb_tx_queues = nb_queues;
434         return (0);
435 }
436
437 static int
438 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
439                       const struct rte_eth_conf *dev_conf)
440 {
441         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
442
443         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
444                 /* check multi-queue mode */
445                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ||
446                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
447                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
448                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
449                         /* SRIOV only works in VMDq enable mode */
450                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
451                                         " SRIOV active, "
452                                         "wrong VMDQ mq_mode rx %u tx %u\n",
453                                         port_id,
454                                         dev_conf->rxmode.mq_mode,
455                                         dev_conf->txmode.mq_mode);
456                         return (-EINVAL);
457                 }
458
459                 switch (dev_conf->rxmode.mq_mode) {
460                 case ETH_MQ_RX_VMDQ_RSS:
461                 case ETH_MQ_RX_VMDQ_DCB:
462                 case ETH_MQ_RX_VMDQ_DCB_RSS:
463                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
464                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
465                                         " SRIOV active, "
466                                         "unsupported VMDQ mq_mode rx %u\n",
467                                         port_id, dev_conf->rxmode.mq_mode);
468                         return (-EINVAL);
469                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
470                         /* if nothing mq mode configure, use default scheme */
471                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
472                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
473                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
474                         break;
475                 }
476
477                 switch (dev_conf->txmode.mq_mode) {
478                 case ETH_MQ_TX_VMDQ_DCB:
479                         /* DCB VMDQ in SRIOV mode, not implement yet */
480                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
481                                         " SRIOV active, "
482                                         "unsupported VMDQ mq_mode tx %u\n",
483                                         port_id, dev_conf->txmode.mq_mode);
484                         return (-EINVAL);
485                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
486                         /* if nothing mq mode configure, use default scheme */
487                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
488                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
489                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
490                         break;
491                 }
492
493                 /* check valid queue number */
494                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
495                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
496                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
497                                     "queue number must less equal to %d\n",
498                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
499                         return (-EINVAL);
500                 }
501         } else {
502                 /* For vmdb+dcb mode check our configuration before we go further */
503                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
504                         const struct rte_eth_vmdq_dcb_conf *conf;
505
506                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
507                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
508                                                 "!= %d\n",
509                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
510                                 return (-EINVAL);
511                         }
512                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
513                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
514                                conf->nb_queue_pools == ETH_32_POOLS)) {
515                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
516                                                 "nb_queue_pools must be %d or %d\n",
517                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
518                                 return (-EINVAL);
519                         }
520                 }
521                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
522                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
523
524                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
525                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
526                                                 "!= %d\n",
527                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
528                                 return (-EINVAL);
529                         }
530                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
531                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
532                                conf->nb_queue_pools == ETH_32_POOLS)) {
533                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
534                                                 "nb_queue_pools != %d or nb_queue_pools "
535                                                 "!= %d\n",
536                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
537                                 return (-EINVAL);
538                         }
539                 }
540
541                 /* For DCB mode check our configuration before we go further */
542                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
543                         const struct rte_eth_dcb_rx_conf *conf;
544
545                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
546                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
547                                                 "!= %d\n",
548                                                 port_id, ETH_DCB_NUM_QUEUES);
549                                 return (-EINVAL);
550                         }
551                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
552                         if (! (conf->nb_tcs == ETH_4_TCS ||
553                                conf->nb_tcs == ETH_8_TCS)) {
554                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
555                                                 "nb_tcs != %d or nb_tcs "
556                                                 "!= %d\n",
557                                                 port_id, ETH_4_TCS, ETH_8_TCS);
558                                 return (-EINVAL);
559                         }
560                 }
561
562                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
563                         const struct rte_eth_dcb_tx_conf *conf;
564
565                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
566                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
567                                                 "!= %d\n",
568                                                 port_id, ETH_DCB_NUM_QUEUES);
569                                 return (-EINVAL);
570                         }
571                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
572                         if (! (conf->nb_tcs == ETH_4_TCS ||
573                                conf->nb_tcs == ETH_8_TCS)) {
574                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
575                                                 "nb_tcs != %d or nb_tcs "
576                                                 "!= %d\n",
577                                                 port_id, ETH_4_TCS, ETH_8_TCS);
578                                 return (-EINVAL);
579                         }
580                 }
581         }
582         return 0;
583 }
584
585 int
586 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
587                       const struct rte_eth_conf *dev_conf)
588 {
589         struct rte_eth_dev *dev;
590         struct rte_eth_dev_info dev_info;
591         int diag;
592
593         /* This function is only safe when called from the primary process
594          * in a multi-process setup*/
595         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
596
597         if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
598                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
599                 return (-EINVAL);
600         }
601         dev = &rte_eth_devices[port_id];
602
603         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
604         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
605
606         if (dev->data->dev_started) {
607                 PMD_DEBUG_TRACE(
608                     "port %d must be stopped to allow configuration\n", port_id);
609                 return (-EBUSY);
610         }
611
612         /*
613          * Check that the numbers of RX and TX queues are not greater
614          * than the maximum number of RX and TX queues supported by the
615          * configured device.
616          */
617         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
618         if (nb_rx_q > dev_info.max_rx_queues) {
619                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
620                                 port_id, nb_rx_q, dev_info.max_rx_queues);
621                 return (-EINVAL);
622         }
623         if (nb_rx_q == 0) {
624                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
625                 return (-EINVAL);
626         }
627
628         if (nb_tx_q > dev_info.max_tx_queues) {
629                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
630                                 port_id, nb_tx_q, dev_info.max_tx_queues);
631                 return (-EINVAL);
632         }
633         if (nb_tx_q == 0) {
634                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
635                 return (-EINVAL);
636         }
637
638         /* Copy the dev_conf parameter into the dev structure */
639         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
640
641         /*
642          * If jumbo frames are enabled, check that the maximum RX packet
643          * length is supported by the configured device.
644          */
645         if (dev_conf->rxmode.jumbo_frame == 1) {
646                 if (dev_conf->rxmode.max_rx_pkt_len >
647                     dev_info.max_rx_pktlen) {
648                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
649                                 " > max valid value %u\n",
650                                 port_id,
651                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
652                                 (unsigned)dev_info.max_rx_pktlen);
653                         return (-EINVAL);
654                 }
655                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
656                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
657                                 " < min valid value %u\n",
658                                 port_id,
659                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
660                                 (unsigned)ETHER_MIN_LEN);
661                         return (-EINVAL);
662                 }
663         } else
664                 /* Use default value */
665                 dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
666
667         /* multipe queue mode checking */
668         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
669         if (diag != 0) {
670                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
671                                 port_id, diag);
672                 return diag;
673         }
674
675         /*
676          * Setup new number of RX/TX queues and reconfigure device.
677          */
678         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
679         if (diag != 0) {
680                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
681                                 port_id, diag);
682                 return diag;
683         }
684
685         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
686         if (diag != 0) {
687                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
688                                 port_id, diag);
689                 rte_eth_dev_rx_queue_config(dev, 0);
690                 return diag;
691         }
692
693         diag = (*dev->dev_ops->dev_configure)(dev);
694         if (diag != 0) {
695                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
696                                 port_id, diag);
697                 rte_eth_dev_rx_queue_config(dev, 0);
698                 rte_eth_dev_tx_queue_config(dev, 0);
699                 return diag;
700         }
701
702         return 0;
703 }
704
705 static void
706 rte_eth_dev_config_restore(uint8_t port_id)
707 {
708         struct rte_eth_dev *dev;
709         struct rte_eth_dev_info dev_info;
710         struct ether_addr addr;
711         uint16_t i;
712         uint32_t pool = 0;
713
714         dev = &rte_eth_devices[port_id];
715
716         rte_eth_dev_info_get(port_id, &dev_info);
717
718         if (RTE_ETH_DEV_SRIOV(dev).active)
719                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
720
721         /* replay MAC address configuration */
722         for (i = 0; i < dev_info.max_mac_addrs; i++) {
723                 addr = dev->data->mac_addrs[i];
724
725                 /* skip zero address */
726                 if (is_zero_ether_addr(&addr))
727                         continue;
728
729                 /* add address to the hardware */
730                 if  (*dev->dev_ops->mac_addr_add)
731                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
732                 else {
733                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
734                                         port_id);
735                         /* exit the loop but not return an error */
736                         break;
737                 }
738         }
739
740         /* replay promiscuous configuration */
741         if (rte_eth_promiscuous_get(port_id) == 1)
742                 rte_eth_promiscuous_enable(port_id);
743         else if (rte_eth_promiscuous_get(port_id) == 0)
744                 rte_eth_promiscuous_disable(port_id);
745
746         /* replay allmulticast configuration */
747         if (rte_eth_allmulticast_get(port_id) == 1)
748                 rte_eth_allmulticast_enable(port_id);
749         else if (rte_eth_allmulticast_get(port_id) == 0)
750                 rte_eth_allmulticast_disable(port_id);
751 }
752
753 int
754 rte_eth_dev_start(uint8_t port_id)
755 {
756         struct rte_eth_dev *dev;
757         int diag;
758
759         /* This function is only safe when called from the primary process
760          * in a multi-process setup*/
761         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
762
763         if (port_id >= nb_ports) {
764                 PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
765                 return (-EINVAL);
766         }
767         dev = &rte_eth_devices[port_id];
768
769         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
770
771         if (dev->data->dev_started != 0) {
772                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
773                         " already started\n",
774                         port_id);
775                 return (0);
776         }
777
778         diag = (*dev->dev_ops->dev_start)(dev);
779         if (diag == 0)
780                 dev->data->dev_started = 1;
781         else
782                 return diag;
783
784         rte_eth_dev_config_restore(port_id);
785
786         return 0;
787 }
788
789 void
790 rte_eth_dev_stop(uint8_t port_id)
791 {
792         struct rte_eth_dev *dev;
793
794         /* This function is only safe when called from the primary process
795          * in a multi-process setup*/
796         PROC_PRIMARY_OR_RET();
797
798         if (port_id >= nb_ports) {
799                 PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
800                 return;
801         }
802         dev = &rte_eth_devices[port_id];
803
804         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
805
806         if (dev->data->dev_started == 0) {
807                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
808                         " already stopped\n",
809                         port_id);
810                 return;
811         }
812
813         dev->data->dev_started = 0;
814         (*dev->dev_ops->dev_stop)(dev);
815 }
816
817 int
818 rte_eth_dev_set_link_up(uint8_t port_id)
819 {
820         struct rte_eth_dev *dev;
821
822         /* This function is only safe when called from the primary process
823          * in a multi-process setup*/
824         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
825
826         if (port_id >= nb_ports) {
827                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
828                 return -EINVAL;
829         }
830         dev = &rte_eth_devices[port_id];
831
832         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
833         return (*dev->dev_ops->dev_set_link_up)(dev);
834 }
835
836 int
837 rte_eth_dev_set_link_down(uint8_t port_id)
838 {
839         struct rte_eth_dev *dev;
840
841         /* This function is only safe when called from the primary process
842          * in a multi-process setup*/
843         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
844
845         if (port_id >= nb_ports) {
846                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
847                 return -EINVAL;
848         }
849         dev = &rte_eth_devices[port_id];
850
851         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
852         return (*dev->dev_ops->dev_set_link_down)(dev);
853 }
854
855 void
856 rte_eth_dev_close(uint8_t port_id)
857 {
858         struct rte_eth_dev *dev;
859
860         /* This function is only safe when called from the primary process
861          * in a multi-process setup*/
862         PROC_PRIMARY_OR_RET();
863
864         if (port_id >= nb_ports) {
865                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
866                 return;
867         }
868
869         dev = &rte_eth_devices[port_id];
870
871         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
872         dev->data->dev_started = 0;
873         (*dev->dev_ops->dev_close)(dev);
874 }
875
876 int
877 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
878                        uint16_t nb_rx_desc, unsigned int socket_id,
879                        const struct rte_eth_rxconf *rx_conf,
880                        struct rte_mempool *mp)
881 {
882         struct rte_eth_dev *dev;
883         struct rte_pktmbuf_pool_private *mbp_priv;
884         struct rte_eth_dev_info dev_info;
885
886         /* This function is only safe when called from the primary process
887          * in a multi-process setup*/
888         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
889
890         if (port_id >= nb_ports) {
891                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
892                 return (-EINVAL);
893         }
894         dev = &rte_eth_devices[port_id];
895         if (rx_queue_id >= dev->data->nb_rx_queues) {
896                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
897                 return (-EINVAL);
898         }
899
900         if (dev->data->dev_started) {
901                 PMD_DEBUG_TRACE(
902                     "port %d must be stopped to allow configuration\n", port_id);
903                 return -EBUSY;
904         }
905
906         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
907         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
908
909         /*
910          * Check the size of the mbuf data buffer.
911          * This value must be provided in the private data of the memory pool.
912          * First check that the memory pool has a valid private data.
913          */
914         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
915         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
916                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
917                                 mp->name, (int) mp->private_data_size,
918                                 (int) sizeof(struct rte_pktmbuf_pool_private));
919                 return (-ENOSPC);
920         }
921         mbp_priv = rte_mempool_get_priv(mp);
922         if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
923             dev_info.min_rx_bufsize) {
924                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
925                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
926                                 "=%d)\n",
927                                 mp->name,
928                                 (int)mbp_priv->mbuf_data_room_size,
929                                 (int)(RTE_PKTMBUF_HEADROOM +
930                                       dev_info.min_rx_bufsize),
931                                 (int)RTE_PKTMBUF_HEADROOM,
932                                 (int)dev_info.min_rx_bufsize);
933                 return (-EINVAL);
934         }
935
936         return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
937                                                socket_id, rx_conf, mp);
938 }
939
940 int
941 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
942                        uint16_t nb_tx_desc, unsigned int socket_id,
943                        const struct rte_eth_txconf *tx_conf)
944 {
945         struct rte_eth_dev *dev;
946
947         /* This function is only safe when called from the primary process
948          * in a multi-process setup*/
949         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
950
951         if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
952                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
953                 return (-EINVAL);
954         }
955         dev = &rte_eth_devices[port_id];
956         if (tx_queue_id >= dev->data->nb_tx_queues) {
957                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
958                 return (-EINVAL);
959         }
960
961         if (dev->data->dev_started) {
962                 PMD_DEBUG_TRACE(
963                     "port %d must be stopped to allow configuration\n", port_id);
964                 return -EBUSY;
965         }
966
967         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
968         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
969                                                socket_id, tx_conf);
970 }
971
972 void
973 rte_eth_promiscuous_enable(uint8_t port_id)
974 {
975         struct rte_eth_dev *dev;
976
977         if (port_id >= nb_ports) {
978                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
979                 return;
980         }
981         dev = &rte_eth_devices[port_id];
982
983         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
984         (*dev->dev_ops->promiscuous_enable)(dev);
985         dev->data->promiscuous = 1;
986 }
987
988 void
989 rte_eth_promiscuous_disable(uint8_t port_id)
990 {
991         struct rte_eth_dev *dev;
992
993         if (port_id >= nb_ports) {
994                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
995                 return;
996         }
997         dev = &rte_eth_devices[port_id];
998
999         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1000         dev->data->promiscuous = 0;
1001         (*dev->dev_ops->promiscuous_disable)(dev);
1002 }
1003
1004 int
1005 rte_eth_promiscuous_get(uint8_t port_id)
1006 {
1007         struct rte_eth_dev *dev;
1008
1009         if (port_id >= nb_ports) {
1010                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1011                 return -1;
1012         }
1013
1014         dev = &rte_eth_devices[port_id];
1015         return dev->data->promiscuous;
1016 }
1017
1018 void
1019 rte_eth_allmulticast_enable(uint8_t port_id)
1020 {
1021         struct rte_eth_dev *dev;
1022
1023         if (port_id >= nb_ports) {
1024                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1025                 return;
1026         }
1027         dev = &rte_eth_devices[port_id];
1028
1029         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1030         (*dev->dev_ops->allmulticast_enable)(dev);
1031         dev->data->all_multicast = 1;
1032 }
1033
1034 void
1035 rte_eth_allmulticast_disable(uint8_t port_id)
1036 {
1037         struct rte_eth_dev *dev;
1038
1039         if (port_id >= nb_ports) {
1040                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1041                 return;
1042         }
1043         dev = &rte_eth_devices[port_id];
1044
1045         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1046         dev->data->all_multicast = 0;
1047         (*dev->dev_ops->allmulticast_disable)(dev);
1048 }
1049
1050 int
1051 rte_eth_allmulticast_get(uint8_t port_id)
1052 {
1053         struct rte_eth_dev *dev;
1054
1055         if (port_id >= nb_ports) {
1056                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1057                 return -1;
1058         }
1059
1060         dev = &rte_eth_devices[port_id];
1061         return dev->data->all_multicast;
1062 }
1063
1064 static inline int
1065 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1066                                 struct rte_eth_link *link)
1067 {
1068         struct rte_eth_link *dst = link;
1069         struct rte_eth_link *src = &(dev->data->dev_link);
1070
1071         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1072                                         *(uint64_t *)src) == 0)
1073                 return -1;
1074
1075         return 0;
1076 }
1077
1078 void
1079 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1080 {
1081         struct rte_eth_dev *dev;
1082
1083         if (port_id >= nb_ports) {
1084                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1085                 return;
1086         }
1087         dev = &rte_eth_devices[port_id];
1088         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1089
1090         if (dev->data->dev_conf.intr_conf.lsc != 0)
1091                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1092         else {
1093                 (*dev->dev_ops->link_update)(dev, 1);
1094                 *eth_link = dev->data->dev_link;
1095         }
1096 }
1097
1098 void
1099 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1100 {
1101         struct rte_eth_dev *dev;
1102
1103         if (port_id >= nb_ports) {
1104                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1105                 return;
1106         }
1107         dev = &rte_eth_devices[port_id];
1108         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1109
1110         if (dev->data->dev_conf.intr_conf.lsc != 0)
1111                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1112         else {
1113                 (*dev->dev_ops->link_update)(dev, 0);
1114                 *eth_link = dev->data->dev_link;
1115         }
1116 }
1117
1118 void
1119 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1120 {
1121         struct rte_eth_dev *dev;
1122
1123         if (port_id >= nb_ports) {
1124                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1125                 return;
1126         }
1127         dev = &rte_eth_devices[port_id];
1128         memset(stats, 0, sizeof(*stats));
1129
1130         FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
1131         (*dev->dev_ops->stats_get)(dev, stats);
1132         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1133 }
1134
1135 void
1136 rte_eth_stats_reset(uint8_t port_id)
1137 {
1138         struct rte_eth_dev *dev;
1139
1140         if (port_id >= nb_ports) {
1141                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1142                 return;
1143         }
1144         dev = &rte_eth_devices[port_id];
1145
1146         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1147         (*dev->dev_ops->stats_reset)(dev);
1148 }
1149
1150
1151 static int
1152 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1153                 uint8_t is_rx)
1154 {
1155         struct rte_eth_dev *dev;
1156
1157         if (port_id >= nb_ports) {
1158                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1159                 return -ENODEV;
1160         }
1161         dev = &rte_eth_devices[port_id];
1162
1163         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1164         return (*dev->dev_ops->queue_stats_mapping_set)
1165                         (dev, queue_id, stat_idx, is_rx);
1166 }
1167
1168
1169 int
1170 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1171                 uint8_t stat_idx)
1172 {
1173         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1174                         STAT_QMAP_TX);
1175 }
1176
1177
1178 int
1179 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1180                 uint8_t stat_idx)
1181 {
1182         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1183                         STAT_QMAP_RX);
1184 }
1185
1186
1187 void
1188 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1189 {
1190         struct rte_eth_dev *dev;
1191
1192         if (port_id >= nb_ports) {
1193                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1194                 return;
1195         }
1196         dev = &rte_eth_devices[port_id];
1197
1198         /* Default device offload capabilities to zero */
1199         dev_info->rx_offload_capa = 0;
1200         dev_info->tx_offload_capa = 0;
1201         dev_info->if_index = 0;
1202         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1203         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1204         dev_info->pci_dev = dev->pci_dev;
1205         if (dev->driver)
1206                 dev_info->driver_name = dev->driver->pci_drv.name;
1207 }
1208
1209 void
1210 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1211 {
1212         struct rte_eth_dev *dev;
1213
1214         if (port_id >= nb_ports) {
1215                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1216                 return;
1217         }
1218         dev = &rte_eth_devices[port_id];
1219         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1220 }
1221
1222 int
1223 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1224 {
1225         struct rte_eth_dev *dev;
1226
1227         if (port_id >= nb_ports) {
1228                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1229                 return (-ENODEV);
1230         }
1231         dev = &rte_eth_devices[port_id];
1232         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1233                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1234                 return (-ENOSYS);
1235         }
1236
1237         if (vlan_id > 4095) {
1238                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1239                                 port_id, (unsigned) vlan_id);
1240                 return (-EINVAL);
1241         }
1242         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1243         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1244         return (0);
1245 }
1246
1247 int
1248 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1249 {
1250         struct rte_eth_dev *dev;
1251
1252         if (port_id >= nb_ports) {
1253                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1254                 return (-ENODEV);
1255         }
1256
1257         dev = &rte_eth_devices[port_id];
1258         if (rx_queue_id >= dev->data->nb_rx_queues) {
1259                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1260                 return (-EINVAL);
1261         }
1262
1263         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1264         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1265
1266         return (0);
1267 }
1268
1269 int
1270 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1271 {
1272         struct rte_eth_dev *dev;
1273
1274         if (port_id >= nb_ports) {
1275                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1276                 return (-ENODEV);
1277         }
1278
1279         dev = &rte_eth_devices[port_id];
1280         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1281         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1282
1283         return (0);
1284 }
1285
1286 int
1287 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1288 {
1289         struct rte_eth_dev *dev;
1290         int ret = 0;
1291         int mask = 0;
1292         int cur, org = 0;
1293
1294         if (port_id >= nb_ports) {
1295                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1296                 return (-ENODEV);
1297         }
1298
1299         dev = &rte_eth_devices[port_id];
1300
1301         /*check which option changed by application*/
1302         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1303         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1304         if (cur != org){
1305                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1306                 mask |= ETH_VLAN_STRIP_MASK;
1307         }
1308
1309         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1310         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1311         if (cur != org){
1312                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1313                 mask |= ETH_VLAN_FILTER_MASK;
1314         }
1315
1316         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1317         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1318         if (cur != org){
1319                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1320                 mask |= ETH_VLAN_EXTEND_MASK;
1321         }
1322
1323         /*no change*/
1324         if(mask == 0)
1325                 return ret;
1326
1327         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1328         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1329
1330         return ret;
1331 }
1332
1333 int
1334 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1335 {
1336         struct rte_eth_dev *dev;
1337         int ret = 0;
1338
1339         if (port_id >= nb_ports) {
1340                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1341                 return (-ENODEV);
1342         }
1343
1344         dev = &rte_eth_devices[port_id];
1345
1346         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1347                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1348
1349         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1350                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1351
1352         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1353                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1354
1355         return ret;
1356 }
1357
1358
1359 int
1360 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1361                                       struct rte_fdir_filter *fdir_filter,
1362                                       uint8_t queue)
1363 {
1364         struct rte_eth_dev *dev;
1365
1366         if (port_id >= nb_ports) {
1367                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1368                 return (-ENODEV);
1369         }
1370
1371         dev = &rte_eth_devices[port_id];
1372
1373         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1374                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1375                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1376                 return (-ENOSYS);
1377         }
1378
1379         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1380              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1381             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1382                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1383                                 "None l4type, source & destinations ports " \
1384                                 "should be null!\n");
1385                 return (-EINVAL);
1386         }
1387
1388         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1389         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1390                                                                 queue);
1391 }
1392
1393 int
1394 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1395                                          struct rte_fdir_filter *fdir_filter,
1396                                          uint8_t queue)
1397 {
1398         struct rte_eth_dev *dev;
1399
1400         if (port_id >= nb_ports) {
1401                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1402                 return (-ENODEV);
1403         }
1404
1405         dev = &rte_eth_devices[port_id];
1406
1407         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1408                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1409                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1410                 return (-ENOSYS);
1411         }
1412
1413         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1414              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1415             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1416                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1417                                 "None l4type, source & destinations ports " \
1418                                 "should be null!\n");
1419                 return (-EINVAL);
1420         }
1421
1422         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1423         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1424                                                                 queue);
1425
1426 }
1427
1428 int
1429 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1430                                          struct rte_fdir_filter *fdir_filter)
1431 {
1432         struct rte_eth_dev *dev;
1433
1434         if (port_id >= nb_ports) {
1435                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1436                 return (-ENODEV);
1437         }
1438
1439         dev = &rte_eth_devices[port_id];
1440
1441         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1442                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1443                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1444                 return (-ENOSYS);
1445         }
1446
1447         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1448              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1449             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1450                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1451                                 "None l4type source & destinations ports " \
1452                                 "should be null!\n");
1453                 return (-EINVAL);
1454         }
1455
1456         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1457         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1458 }
1459
1460 int
1461 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1462 {
1463         struct rte_eth_dev *dev;
1464
1465         if (port_id >= nb_ports) {
1466                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1467                 return (-ENODEV);
1468         }
1469
1470         dev = &rte_eth_devices[port_id];
1471         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1472                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1473                 return (-ENOSYS);
1474         }
1475
1476         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1477
1478         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1479         return (0);
1480 }
1481
1482 int
1483 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1484                                     struct rte_fdir_filter *fdir_filter,
1485                                     uint16_t soft_id, uint8_t queue,
1486                                     uint8_t drop)
1487 {
1488         struct rte_eth_dev *dev;
1489
1490         if (port_id >= nb_ports) {
1491                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1492                 return (-ENODEV);
1493         }
1494
1495         dev = &rte_eth_devices[port_id];
1496
1497         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1498                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1499                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1500                 return (-ENOSYS);
1501         }
1502
1503         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1504              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1505             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1506                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1507                                 "None l4type, source & destinations ports " \
1508                                 "should be null!\n");
1509                 return (-EINVAL);
1510         }
1511
1512         /* For now IPv6 is not supported with perfect filter */
1513         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1514                 return (-ENOTSUP);
1515
1516         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1517         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1518                                                                 soft_id, queue,
1519                                                                 drop);
1520 }
1521
1522 int
1523 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1524                                        struct rte_fdir_filter *fdir_filter,
1525                                        uint16_t soft_id, uint8_t queue,
1526                                        uint8_t drop)
1527 {
1528         struct rte_eth_dev *dev;
1529
1530         if (port_id >= nb_ports) {
1531                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1532                 return (-ENODEV);
1533         }
1534
1535         dev = &rte_eth_devices[port_id];
1536
1537         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1538                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1539                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1540                 return (-ENOSYS);
1541         }
1542
1543         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1544              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1545             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1546                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1547                                 "None l4type, source & destinations ports " \
1548                                 "should be null!\n");
1549                 return (-EINVAL);
1550         }
1551
1552         /* For now IPv6 is not supported with perfect filter */
1553         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1554                 return (-ENOTSUP);
1555
1556         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1557         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1558                                                         soft_id, queue, drop);
1559 }
1560
1561 int
1562 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1563                                        struct rte_fdir_filter *fdir_filter,
1564                                        uint16_t soft_id)
1565 {
1566         struct rte_eth_dev *dev;
1567
1568         if (port_id >= nb_ports) {
1569                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1570                 return (-ENODEV);
1571         }
1572
1573         dev = &rte_eth_devices[port_id];
1574
1575         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1576                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1577                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1578                 return (-ENOSYS);
1579         }
1580
1581         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1582              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1583             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1584                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1585                                 "None l4type, source & destinations ports " \
1586                                 "should be null!\n");
1587                 return (-EINVAL);
1588         }
1589
1590         /* For now IPv6 is not supported with perfect filter */
1591         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1592                 return (-ENOTSUP);
1593
1594         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1595         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
1596                                                                 soft_id);
1597 }
1598
1599 int
1600 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1601 {
1602         struct rte_eth_dev *dev;
1603
1604         if (port_id >= nb_ports) {
1605                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1606                 return (-ENODEV);
1607         }
1608
1609         dev = &rte_eth_devices[port_id];
1610         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1611                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1612                 return (-ENOSYS);
1613         }
1614
1615         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1616         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1617 }
1618
1619 int
1620 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1621 {
1622         struct rte_eth_dev *dev;
1623
1624         if (port_id >= nb_ports) {
1625                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1626                 return (-ENODEV);
1627         }
1628
1629         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1630                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1631                 return (-EINVAL);
1632         }
1633
1634         dev = &rte_eth_devices[port_id];
1635         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1636         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1637 }
1638
1639 int
1640 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1641 {
1642         struct rte_eth_dev *dev;
1643
1644         if (port_id >= nb_ports) {
1645                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1646                 return (-ENODEV);
1647         }
1648
1649         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1650                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1651                 return (-EINVAL);
1652         }
1653
1654         dev = &rte_eth_devices[port_id];
1655         /* High water, low water validation are device specific */
1656         if  (*dev->dev_ops->priority_flow_ctrl_set)
1657                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1658         return (-ENOTSUP);
1659 }
1660
1661 int
1662 rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1663 {
1664         struct rte_eth_dev *dev;
1665         uint16_t max_rxq;
1666         uint8_t i,j;
1667
1668         if (port_id >= nb_ports) {
1669                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1670                 return (-ENODEV);
1671         }
1672
1673         /* Invalid mask bit(s) setting */
1674         if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1675                 PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
1676                 return (-EINVAL);
1677         }
1678
1679         dev = &rte_eth_devices[port_id];
1680         max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ?
1681                 dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE;
1682         if (reta_conf->mask_lo != 0) {
1683                 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1684                         if ((reta_conf->mask_lo & (1ULL << i)) &&
1685                                 (reta_conf->reta[i] >= max_rxq)) {
1686                                 PMD_DEBUG_TRACE("RETA hash index output"
1687                                         "configration for port=%d,invalid"
1688                                         "queue=%d\n",port_id,reta_conf->reta[i]);
1689
1690                                 return (-EINVAL);
1691                         }
1692                 }
1693         }
1694
1695         if (reta_conf->mask_hi != 0) {
1696                 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1697                         j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
1698
1699                         /* Check if the max entry >= 128 */
1700                         if ((reta_conf->mask_hi & (1ULL << i)) &&
1701                                 (reta_conf->reta[j] >= max_rxq)) {
1702                                 PMD_DEBUG_TRACE("RETA hash index output"
1703                                         "configration for port=%d,invalid"
1704                                         "queue=%d\n",port_id,reta_conf->reta[j]);
1705
1706                                 return (-EINVAL);
1707                         }
1708                 }
1709         }
1710
1711         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1712         return (*dev->dev_ops->reta_update)(dev, reta_conf);
1713 }
1714
1715 int
1716 rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1717 {
1718         struct rte_eth_dev *dev;
1719
1720         if (port_id >= nb_ports) {
1721                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1722                 return (-ENODEV);
1723         }
1724
1725         if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1726                 PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
1727                 return (-EINVAL);
1728         }
1729
1730         dev = &rte_eth_devices[port_id];
1731         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1732         return (*dev->dev_ops->reta_query)(dev, reta_conf);
1733 }
1734
1735 int
1736 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1737 {
1738         struct rte_eth_dev *dev;
1739         uint16_t rss_hash_protos;
1740
1741         if (port_id >= nb_ports) {
1742                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1743                 return (-ENODEV);
1744         }
1745         rss_hash_protos = rss_conf->rss_hf;
1746         if ((rss_hash_protos != 0) &&
1747             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1748                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1749                                 rss_hash_protos);
1750                 return (-EINVAL);
1751         }
1752         dev = &rte_eth_devices[port_id];
1753         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1754         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1755 }
1756
1757 int
1758 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1759                               struct rte_eth_rss_conf *rss_conf)
1760 {
1761         struct rte_eth_dev *dev;
1762
1763         if (port_id >= nb_ports) {
1764                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1765                 return (-ENODEV);
1766         }
1767         dev = &rte_eth_devices[port_id];
1768         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1769         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1770 }
1771
1772 int
1773 rte_eth_led_on(uint8_t port_id)
1774 {
1775         struct rte_eth_dev *dev;
1776
1777         if (port_id >= nb_ports) {
1778                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1779                 return (-ENODEV);
1780         }
1781
1782         dev = &rte_eth_devices[port_id];
1783         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1784         return ((*dev->dev_ops->dev_led_on)(dev));
1785 }
1786
1787 int
1788 rte_eth_led_off(uint8_t port_id)
1789 {
1790         struct rte_eth_dev *dev;
1791
1792         if (port_id >= nb_ports) {
1793                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1794                 return (-ENODEV);
1795         }
1796
1797         dev = &rte_eth_devices[port_id];
1798         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1799         return ((*dev->dev_ops->dev_led_off)(dev));
1800 }
1801
1802 /*
1803  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1804  * an empty spot.
1805  */
1806 static inline int
1807 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1808 {
1809         struct rte_eth_dev_info dev_info;
1810         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1811         unsigned i;
1812
1813         rte_eth_dev_info_get(port_id, &dev_info);
1814
1815         for (i = 0; i < dev_info.max_mac_addrs; i++)
1816                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1817                         return i;
1818
1819         return -1;
1820 }
1821
1822 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
1823
1824 int
1825 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
1826                         uint32_t pool)
1827 {
1828         struct rte_eth_dev *dev;
1829         int index;
1830         uint64_t pool_mask;
1831
1832         if (port_id >= nb_ports) {
1833                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1834                 return (-ENODEV);
1835         }
1836         dev = &rte_eth_devices[port_id];
1837         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
1838
1839         if (is_zero_ether_addr(addr)) {
1840                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
1841                         port_id);
1842                 return (-EINVAL);
1843         }
1844         if (pool >= ETH_64_POOLS) {
1845                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
1846                 return (-EINVAL);
1847         }
1848
1849         index = get_mac_addr_index(port_id, addr);
1850         if (index < 0) {
1851                 index = get_mac_addr_index(port_id, &null_mac_addr);
1852                 if (index < 0) {
1853                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1854                                 port_id);
1855                         return (-ENOSPC);
1856                 }
1857         } else {
1858                 pool_mask = dev->data->mac_pool_sel[index];
1859
1860                 /* Check if both MAC address and pool is alread there, and do nothing */
1861                 if (pool_mask & (1ULL << pool))
1862                         return 0;
1863         }
1864
1865         /* Update NIC */
1866         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
1867
1868         /* Update address in NIC data structure */
1869         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
1870
1871         /* Update pool bitmap in NIC data structure */
1872         dev->data->mac_pool_sel[index] |= (1ULL << pool);
1873
1874         return 0;
1875 }
1876
1877 int
1878 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
1879 {
1880         struct rte_eth_dev *dev;
1881         int index;
1882
1883         if (port_id >= nb_ports) {
1884                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1885                 return (-ENODEV);
1886         }
1887         dev = &rte_eth_devices[port_id];
1888         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
1889
1890         index = get_mac_addr_index(port_id, addr);
1891         if (index == 0) {
1892                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
1893                 return (-EADDRINUSE);
1894         } else if (index < 0)
1895                 return 0;  /* Do nothing if address wasn't found */
1896
1897         /* Update NIC */
1898         (*dev->dev_ops->mac_addr_remove)(dev, index);
1899
1900         /* Update address in NIC data structure */
1901         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
1902
1903         return 0;
1904 }
1905
1906 int
1907 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
1908                                 uint16_t rx_mode, uint8_t on)
1909 {
1910         uint16_t num_vfs;
1911         struct rte_eth_dev *dev;
1912         struct rte_eth_dev_info dev_info;
1913
1914         if (port_id >= nb_ports) {
1915                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
1916                                 port_id);
1917                 return (-ENODEV);
1918         }
1919
1920         dev = &rte_eth_devices[port_id];
1921         rte_eth_dev_info_get(port_id, &dev_info);
1922
1923         num_vfs = dev_info.max_vfs;
1924         if (vf > num_vfs)
1925         {
1926                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
1927                 return (-EINVAL);
1928         }
1929         if (rx_mode == 0)
1930         {
1931                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
1932                 return (-EINVAL);
1933         }
1934         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
1935         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
1936 }
1937
1938 /*
1939  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1940  * an empty spot.
1941  */
1942 static inline int
1943 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1944 {
1945         struct rte_eth_dev_info dev_info;
1946         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1947         unsigned i;
1948
1949         rte_eth_dev_info_get(port_id, &dev_info);
1950         if (!dev->data->hash_mac_addrs)
1951                 return -1;
1952
1953         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
1954                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
1955                         ETHER_ADDR_LEN) == 0)
1956                         return i;
1957
1958         return -1;
1959 }
1960
1961 int
1962 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
1963                                 uint8_t on)
1964 {
1965         int index;
1966         int ret;
1967         struct rte_eth_dev *dev;
1968
1969         if (port_id >= nb_ports) {
1970                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
1971                         port_id);
1972                 return (-ENODEV);
1973         }
1974
1975         dev = &rte_eth_devices[port_id];
1976         if (is_zero_ether_addr(addr)) {
1977                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
1978                         port_id);
1979                 return (-EINVAL);
1980         }
1981
1982         index = get_hash_mac_addr_index(port_id, addr);
1983         /* Check if it's already there, and do nothing */
1984         if ((index >= 0) && (on))
1985                 return 0;
1986
1987         if (index < 0) {
1988                 if (!on) {
1989                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
1990                                 "set in UTA\n", port_id);
1991                         return (-EINVAL);
1992                 }
1993
1994                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
1995                 if (index < 0) {
1996                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1997                                         port_id);
1998                         return (-ENOSPC);
1999                 }
2000         }
2001
2002         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2003         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2004         if (ret == 0) {
2005                 /* Update address in NIC data structure */
2006                 if (on)
2007                         ether_addr_copy(addr,
2008                                         &dev->data->hash_mac_addrs[index]);
2009                 else
2010                         ether_addr_copy(&null_mac_addr,
2011                                         &dev->data->hash_mac_addrs[index]);
2012         }
2013
2014         return ret;
2015 }
2016
2017 int
2018 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2019 {
2020         struct rte_eth_dev *dev;
2021
2022         if (port_id >= nb_ports) {
2023                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2024                         port_id);
2025                 return (-ENODEV);
2026         }
2027
2028         dev = &rte_eth_devices[port_id];
2029
2030         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2031         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2032 }
2033
2034 int
2035 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
2036 {
2037         uint16_t num_vfs;
2038         struct rte_eth_dev *dev;
2039         struct rte_eth_dev_info dev_info;
2040
2041         if (port_id >= nb_ports) {
2042                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2043                 return (-ENODEV);
2044         }
2045
2046         dev = &rte_eth_devices[port_id];
2047         rte_eth_dev_info_get(port_id, &dev_info);
2048
2049         num_vfs = dev_info.max_vfs;
2050         if (vf > num_vfs)
2051         {
2052                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2053                 return (-EINVAL);
2054         }
2055
2056         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2057         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2058 }
2059
2060 int
2061 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2062 {
2063         uint16_t num_vfs;
2064         struct rte_eth_dev *dev;
2065         struct rte_eth_dev_info dev_info;
2066
2067         if (port_id >= nb_ports) {
2068                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2069                 return (-ENODEV);
2070         }
2071
2072         dev = &rte_eth_devices[port_id];
2073         rte_eth_dev_info_get(port_id, &dev_info);
2074
2075         num_vfs = dev_info.max_vfs;
2076         if (vf > num_vfs)
2077         {
2078                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2079                 return (-EINVAL);
2080         }
2081
2082         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2083         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2084 }
2085
2086 int
2087 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2088                                  uint64_t vf_mask,uint8_t vlan_on)
2089 {
2090         struct rte_eth_dev *dev;
2091
2092         if (port_id >= nb_ports) {
2093                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2094                                 port_id);
2095                 return (-ENODEV);
2096         }
2097         dev = &rte_eth_devices[port_id];
2098
2099         if(vlan_id > ETHER_MAX_VLAN_ID)
2100         {
2101                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2102                         vlan_id);
2103                 return (-EINVAL);
2104         }
2105         if (vf_mask == 0)
2106         {
2107                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2108                 return (-EINVAL);
2109         }
2110
2111         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2112         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2113                                                 vf_mask,vlan_on);
2114 }
2115
2116 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2117                                         uint16_t tx_rate)
2118 {
2119         struct rte_eth_dev *dev;
2120         struct rte_eth_dev_info dev_info;
2121         struct rte_eth_link link;
2122
2123         if (port_id >= nb_ports) {
2124                 PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
2125                                 port_id);
2126                 return -ENODEV;
2127         }
2128
2129         dev = &rte_eth_devices[port_id];
2130         rte_eth_dev_info_get(port_id, &dev_info);
2131         link = dev->data->dev_link;
2132
2133         if (queue_idx > dev_info.max_tx_queues) {
2134                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2135                                 "invalid queue id=%d\n", port_id, queue_idx);
2136                 return -EINVAL;
2137         }
2138
2139         if (tx_rate > link.link_speed) {
2140                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2141                                 "bigger than link speed= %d\n",
2142                         tx_rate, link_speed);
2143                 return -EINVAL;
2144         }
2145
2146         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2147         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2148 }
2149
2150 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2151                                 uint64_t q_msk)
2152 {
2153         struct rte_eth_dev *dev;
2154         struct rte_eth_dev_info dev_info;
2155         struct rte_eth_link link;
2156
2157         if (q_msk == 0)
2158                 return 0;
2159
2160         if (port_id >= nb_ports) {
2161                 PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
2162                                 port_id);
2163                 return -ENODEV;
2164         }
2165
2166         dev = &rte_eth_devices[port_id];
2167         rte_eth_dev_info_get(port_id, &dev_info);
2168         link = dev->data->dev_link;
2169
2170         if (vf > dev_info.max_vfs) {
2171                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2172                                 "invalid vf id=%d\n", port_id, vf);
2173                 return -EINVAL;
2174         }
2175
2176         if (tx_rate > link.link_speed) {
2177                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2178                                 "bigger than link speed= %d\n",
2179                                 tx_rate, link_speed);
2180                 return -EINVAL;
2181         }
2182
2183         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2184         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2185 }
2186
2187 int
2188 rte_eth_mirror_rule_set(uint8_t port_id,
2189                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
2190                         uint8_t rule_id, uint8_t on)
2191 {
2192         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2193
2194         if (port_id >= nb_ports) {
2195                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2196                 return (-ENODEV);
2197         }
2198
2199         if (mirror_conf->rule_type_mask == 0) {
2200                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2201                 return (-EINVAL);
2202         }
2203
2204         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2205                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
2206                         "be 0-%d\n",ETH_64_POOLS - 1);
2207                 return (-EINVAL);
2208         }
2209
2210         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
2211                 (mirror_conf->pool_mask == 0)) {
2212                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
2213                                 "be 0.\n");
2214                 return (-EINVAL);
2215         }
2216
2217         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2218         {
2219                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2220                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
2221                 return (-EINVAL);
2222         }
2223
2224         dev = &rte_eth_devices[port_id];
2225         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2226
2227         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2228 }
2229
2230 int
2231 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2232 {
2233         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2234
2235         if (port_id >= nb_ports) {
2236                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2237                 return (-ENODEV);
2238         }
2239
2240         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2241         {
2242                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2243                         ETH_VMDQ_NUM_MIRROR_RULE-1);
2244                 return (-EINVAL);
2245         }
2246
2247         dev = &rte_eth_devices[port_id];
2248         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2249
2250         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2251 }
2252
2253 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2254 uint16_t
2255 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2256                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2257 {
2258         struct rte_eth_dev *dev;
2259
2260         if (port_id >= nb_ports) {
2261                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2262                 return 0;
2263         }
2264         dev = &rte_eth_devices[port_id];
2265         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
2266         if (queue_id >= dev->data->nb_rx_queues) {
2267                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2268                 return 0;
2269         }
2270         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2271                                                 rx_pkts, nb_pkts);
2272 }
2273
2274 uint16_t
2275 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2276                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2277 {
2278         struct rte_eth_dev *dev;
2279
2280         if (port_id >= nb_ports) {
2281                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2282                 return 0;
2283         }
2284         dev = &rte_eth_devices[port_id];
2285
2286         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
2287         if (queue_id >= dev->data->nb_tx_queues) {
2288                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2289                 return 0;
2290         }
2291         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2292                                                 tx_pkts, nb_pkts);
2293 }
2294
2295 uint32_t
2296 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2297 {
2298         struct rte_eth_dev *dev;
2299
2300         if (port_id >= nb_ports) {
2301                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2302                 return 0;
2303         }
2304         dev = &rte_eth_devices[port_id];
2305         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2306         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2307 }
2308
2309 int
2310 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2311 {
2312         struct rte_eth_dev *dev;
2313
2314         if (port_id >= nb_ports) {
2315                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2316                 return (-ENODEV);
2317         }
2318         dev = &rte_eth_devices[port_id];
2319         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2320         return (*dev->dev_ops->rx_descriptor_done)( \
2321                 dev->data->rx_queues[queue_id], offset);
2322 }
2323 #endif
2324
2325 int
2326 rte_eth_dev_callback_register(uint8_t port_id,
2327                         enum rte_eth_event_type event,
2328                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2329 {
2330         struct rte_eth_dev *dev;
2331         struct rte_eth_dev_callback *user_cb;
2332
2333         if (!cb_fn)
2334                 return (-EINVAL);
2335         if (port_id >= nb_ports) {
2336                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2337                 return (-EINVAL);
2338         }
2339
2340         dev = &rte_eth_devices[port_id];
2341         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2342
2343         TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
2344                 if (user_cb->cb_fn == cb_fn &&
2345                         user_cb->cb_arg == cb_arg &&
2346                         user_cb->event == event) {
2347                         break;
2348                 }
2349         }
2350
2351         /* create a new callback. */
2352         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2353                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2354                 user_cb->cb_fn = cb_fn;
2355                 user_cb->cb_arg = cb_arg;
2356                 user_cb->event = event;
2357                 TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
2358         }
2359
2360         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2361         return ((user_cb == NULL) ? -ENOMEM : 0);
2362 }
2363
2364 int
2365 rte_eth_dev_callback_unregister(uint8_t port_id,
2366                         enum rte_eth_event_type event,
2367                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2368 {
2369         int ret;
2370         struct rte_eth_dev *dev;
2371         struct rte_eth_dev_callback *cb, *next;
2372
2373         if (!cb_fn)
2374                 return (-EINVAL);
2375         if (port_id >= nb_ports) {
2376                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2377                 return (-EINVAL);
2378         }
2379
2380         dev = &rte_eth_devices[port_id];
2381         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2382
2383         ret = 0;
2384         for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
2385
2386                 next = TAILQ_NEXT(cb, next);
2387
2388                 if (cb->cb_fn != cb_fn || cb->event != event ||
2389                                 (cb->cb_arg != (void *)-1 &&
2390                                 cb->cb_arg != cb_arg))
2391                         continue;
2392
2393                 /*
2394                  * if this callback is not executing right now,
2395                  * then remove it.
2396                  */
2397                 if (cb->active == 0) {
2398                         TAILQ_REMOVE(&(dev->callbacks), cb, next);
2399                         rte_free(cb);
2400                 } else {
2401                         ret = -EAGAIN;
2402                 }
2403         }
2404
2405         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2406         return (ret);
2407 }
2408
2409 void
2410 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2411         enum rte_eth_event_type event)
2412 {
2413         struct rte_eth_dev_callback *cb_lst;
2414         struct rte_eth_dev_callback dev_cb;
2415
2416         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2417         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
2418                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2419                         continue;
2420                 dev_cb = *cb_lst;
2421                 cb_lst->active = 1;
2422                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2423                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2424                                                 dev_cb.cb_arg);
2425                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2426                 cb_lst->active = 0;
2427         }
2428         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2429 }
2430 #ifdef RTE_NIC_BYPASS
2431 int rte_eth_dev_bypass_init(uint8_t port_id)
2432 {
2433         struct rte_eth_dev *dev;
2434
2435         if (port_id >= nb_ports) {
2436                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2437                 return (-ENODEV);
2438         }
2439
2440         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2441                 PMD_DEBUG_TRACE("Invalid port device\n");
2442                 return (-ENODEV);
2443         }
2444
2445         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2446         (*dev->dev_ops->bypass_init)(dev);
2447         return 0;
2448 }
2449
2450 int
2451 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2452 {
2453         struct rte_eth_dev *dev;
2454
2455         if (port_id >= nb_ports) {
2456                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2457                 return (-ENODEV);
2458         }
2459
2460         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2461                 PMD_DEBUG_TRACE("Invalid port device\n");
2462                 return (-ENODEV);
2463         }
2464         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2465         (*dev->dev_ops->bypass_state_show)(dev, state);
2466         return 0;
2467 }
2468
2469 int
2470 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2471 {
2472         struct rte_eth_dev *dev;
2473
2474         if (port_id >= nb_ports) {
2475                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2476                 return (-ENODEV);
2477         }
2478
2479         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2480                 PMD_DEBUG_TRACE("Invalid port device\n");
2481                 return (-ENODEV);
2482         }
2483
2484         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2485         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2486         return 0;
2487 }
2488
2489 int
2490 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2491 {
2492         struct rte_eth_dev *dev;
2493
2494         if (port_id >= nb_ports) {
2495                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2496                 return (-ENODEV);
2497         }
2498
2499         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2500                 PMD_DEBUG_TRACE("Invalid port device\n");
2501                 return (-ENODEV);
2502         }
2503
2504         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2505         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2506         return 0;
2507 }
2508
2509 int
2510 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2511 {
2512         struct rte_eth_dev *dev;
2513
2514         if (port_id >= nb_ports) {
2515                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2516                 return (-ENODEV);
2517         }
2518
2519         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2520                 PMD_DEBUG_TRACE("Invalid port device\n");
2521                 return (-ENODEV);
2522         }
2523
2524         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2525         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2526         return 0;
2527 }
2528
2529 int
2530 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2531 {
2532         struct rte_eth_dev *dev;
2533
2534         if (port_id >= nb_ports) {
2535                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2536                 return (-ENODEV);
2537         }
2538
2539         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2540                 PMD_DEBUG_TRACE("Invalid port device\n");
2541                 return (-ENODEV);
2542         }
2543
2544         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2545         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2546         return 0;
2547 }
2548
2549 int
2550 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2551 {
2552         struct rte_eth_dev *dev;
2553
2554         if (port_id >= nb_ports) {
2555                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2556                 return (-ENODEV);
2557         }
2558
2559         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2560                 PMD_DEBUG_TRACE("Invalid port device\n");
2561                 return (-ENODEV);
2562         }
2563
2564         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2565         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2566         return 0;
2567 }
2568
2569 int
2570 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2571 {
2572         struct rte_eth_dev *dev;
2573
2574         if (port_id >= nb_ports) {
2575                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2576                 return (-ENODEV);
2577         }
2578
2579         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2580                 PMD_DEBUG_TRACE("Invalid port device\n");
2581                 return (-ENODEV);
2582         }
2583
2584         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2585         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2586         return 0;
2587 }
2588
2589 int
2590 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2591 {
2592         struct rte_eth_dev *dev;
2593
2594         if (port_id >= nb_ports) {
2595                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2596                 return (-ENODEV);
2597         }
2598
2599         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2600                 PMD_DEBUG_TRACE("Invalid port device\n");
2601                 return (-ENODEV);
2602         }
2603
2604         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2605         (*dev->dev_ops->bypass_wd_reset)(dev);
2606         return 0;
2607 }
2608 #endif