ethdev: fix compiler warning on PMD_DEBUG_TRACE formats
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44
45 #include <rte_byteorder.h>
46 #include <rte_log.h>
47 #include <rte_debug.h>
48 #include <rte_interrupts.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
72 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
73                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
74         } while (0)
75 #else
76 #define PMD_DEBUG_TRACE(fmt, args...)
77 #endif
78
79 /* Macros for checking for restricting functions to primary instance only */
80 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
81         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
82                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
83                 return (retval); \
84         } \
85 } while(0)
86 #define PROC_PRIMARY_OR_RET() do { \
87         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
88                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
89                 return; \
90         } \
91 } while(0)
92
93 /* Macros to check for invlaid function pointers in dev_ops structure */
94 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
95         if ((func) == NULL) { \
96                 PMD_DEBUG_TRACE("Function not supported\n"); \
97                 return (retval); \
98         } \
99 } while(0)
100 #define FUNC_PTR_OR_RET(func) do { \
101         if ((func) == NULL) { \
102                 PMD_DEBUG_TRACE("Function not supported\n"); \
103                 return; \
104         } \
105 } while(0)
106
107 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
108 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
109 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
110 static uint8_t nb_ports = 0;
111
112 /* spinlock for eth device callbacks */
113 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
114
115 /**
116  * The user application callback description.
117  *
118  * It contains callback address to be registered by user application,
119  * the pointer to the parameters for callback, and the event type.
120  */
121 struct rte_eth_dev_callback {
122         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
123         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
124         void *cb_arg;                           /**< Parameter for callback */
125         enum rte_eth_event_type event;          /**< Interrupt event type */
126         uint32_t active;                        /**< Callback is executing */
127 };
128
129 enum {
130         STAT_QMAP_TX = 0,
131         STAT_QMAP_RX
132 };
133
134 static inline void
135 rte_eth_dev_data_alloc(void)
136 {
137         const unsigned flags = 0;
138         const struct rte_memzone *mz;
139
140         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
141                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
142                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
143                                 rte_socket_id(), flags);
144         } else
145                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
146         if (mz == NULL)
147                 rte_panic("Cannot allocate memzone for ethernet port data\n");
148
149         rte_eth_dev_data = mz->addr;
150         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
151                 memset(rte_eth_dev_data, 0,
152                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
153 }
154
155 struct rte_eth_dev *
156 rte_eth_dev_allocate(void)
157 {
158         struct rte_eth_dev *eth_dev;
159
160         if (nb_ports == RTE_MAX_ETHPORTS) {
161                 PMD_DEBUG_TRACE("Reached maximum number of ethernet ports\n");
162                 return NULL;
163         }
164
165         if (rte_eth_dev_data == NULL)
166                 rte_eth_dev_data_alloc();
167
168         eth_dev = &rte_eth_devices[nb_ports];
169         eth_dev->data = &rte_eth_dev_data[nb_ports];
170         eth_dev->data->port_id = nb_ports++;
171         return eth_dev;
172 }
173
174 static int
175 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
176                  struct rte_pci_device *pci_dev)
177 {
178         struct eth_driver    *eth_drv;
179         struct rte_eth_dev *eth_dev;
180         int diag;
181
182         eth_drv = (struct eth_driver *)pci_drv;
183
184         eth_dev = rte_eth_dev_allocate();
185         if (eth_dev == NULL)
186                 return -ENOMEM;
187
188         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
189                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
190                                   eth_drv->dev_private_size,
191                                   CACHE_LINE_SIZE);
192                 if (eth_dev->data->dev_private == NULL)
193                         rte_panic("Cannot allocate memzone for private port data\n");
194         }
195         eth_dev->pci_dev = pci_dev;
196         eth_dev->driver = eth_drv;
197         eth_dev->data->rx_mbuf_alloc_failed = 0;
198
199         /* init user callbacks */
200         TAILQ_INIT(&(eth_dev->callbacks));
201
202         /*
203          * Set the default maximum frame size.
204          */
205         eth_dev->data->max_frame_size = ETHER_MAX_LEN;
206
207         /* Invoke PMD device initialization function */
208         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
209         if (diag == 0)
210                 return (0);
211
212         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
213                         " failed\n", pci_drv->name,
214                         (unsigned) pci_dev->id.vendor_id,
215                         (unsigned) pci_dev->id.device_id);
216         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
217                 rte_free(eth_dev->data->dev_private);
218         nb_ports--;
219         return diag;
220 }
221
222 /**
223  * Register an Ethernet [Poll Mode] driver.
224  *
225  * Function invoked by the initialization function of an Ethernet driver
226  * to simultaneously register itself as a PCI driver and as an Ethernet
227  * Poll Mode Driver.
228  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
229  * structure embedded in the *eth_drv* structure, after having stored the
230  * address of the rte_eth_dev_init() function in the *devinit* field of
231  * the *pci_drv* structure.
232  * During the PCI probing phase, the rte_eth_dev_init() function is
233  * invoked for each PCI [Ethernet device] matching the embedded PCI
234  * identifiers provided by the driver.
235  */
236 void
237 rte_eth_driver_register(struct eth_driver *eth_drv)
238 {
239         eth_drv->pci_drv.devinit = rte_eth_dev_init;
240         rte_eal_pci_register(&eth_drv->pci_drv);
241 }
242
243 int
244 rte_eth_dev_socket_id(uint8_t port_id)
245 {
246         if (port_id >= nb_ports)
247                 return -1;
248         return rte_eth_devices[port_id].pci_dev->numa_node;
249 }
250
251 uint8_t
252 rte_eth_dev_count(void)
253 {
254         return (nb_ports);
255 }
256
257 static int
258 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
259 {
260         uint16_t old_nb_queues = dev->data->nb_rx_queues;
261         void **rxq;
262         unsigned i;
263
264         if (dev->data->rx_queues == NULL) { /* first time configuration */
265                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
266                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
267                                 CACHE_LINE_SIZE);
268                 if (dev->data->rx_queues == NULL) {
269                         dev->data->nb_rx_queues = 0;
270                         return -(ENOMEM);
271                 }
272         } else { /* re-configure */
273                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
274
275                 rxq = dev->data->rx_queues;
276
277                 for (i = nb_queues; i < old_nb_queues; i++)
278                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
279                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
280                                 CACHE_LINE_SIZE);
281                 if (rxq == NULL)
282                         return -(ENOMEM);
283
284                 if (nb_queues > old_nb_queues)
285                         memset(rxq + old_nb_queues, 0,
286                                 sizeof(rxq[0]) * (nb_queues - old_nb_queues));
287
288                 dev->data->rx_queues = rxq;
289
290         }
291         dev->data->nb_rx_queues = nb_queues;
292         return (0);
293 }
294
295 int
296 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
297 {
298         struct rte_eth_dev *dev;
299
300         /* This function is only safe when called from the primary process
301          * in a multi-process setup*/
302         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
303
304         if (port_id >= nb_ports) {
305                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
306                 return -EINVAL;
307         }
308
309         dev = &rte_eth_devices[port_id];
310         if (rx_queue_id >= dev->data->nb_rx_queues) {
311                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
312                 return -EINVAL;
313         }
314
315         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
316
317         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
318
319 }
320
321 int
322 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
323 {
324         struct rte_eth_dev *dev;
325
326         /* This function is only safe when called from the primary process
327          * in a multi-process setup*/
328         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
329
330         if (port_id >= nb_ports) {
331                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
332                 return -EINVAL;
333         }
334
335         dev = &rte_eth_devices[port_id];
336         if (rx_queue_id >= dev->data->nb_rx_queues) {
337                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
338                 return -EINVAL;
339         }
340
341         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
342
343         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
344
345 }
346
347 int
348 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
349 {
350         struct rte_eth_dev *dev;
351
352         /* This function is only safe when called from the primary process
353          * in a multi-process setup*/
354         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
355
356         if (port_id >= nb_ports) {
357                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
358                 return -EINVAL;
359         }
360
361         dev = &rte_eth_devices[port_id];
362         if (tx_queue_id >= dev->data->nb_tx_queues) {
363                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
364                 return -EINVAL;
365         }
366
367         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
368
369         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
370
371 }
372
373 int
374 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
375 {
376         struct rte_eth_dev *dev;
377
378         /* This function is only safe when called from the primary process
379          * in a multi-process setup*/
380         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
381
382         if (port_id >= nb_ports) {
383                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
384                 return -EINVAL;
385         }
386
387         dev = &rte_eth_devices[port_id];
388         if (tx_queue_id >= dev->data->nb_tx_queues) {
389                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
390                 return -EINVAL;
391         }
392
393         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
394
395         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
396
397 }
398
399 static int
400 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
401 {
402         uint16_t old_nb_queues = dev->data->nb_tx_queues;
403         void **txq;
404         unsigned i;
405
406         if (dev->data->tx_queues == NULL) { /* first time configuration */
407                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
408                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
409                                 CACHE_LINE_SIZE);
410                 if (dev->data->tx_queues == NULL) {
411                         dev->data->nb_tx_queues = 0;
412                         return -(ENOMEM);
413                 }
414         } else { /* re-configure */
415                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
416
417                 txq = dev->data->tx_queues;
418
419                 for (i = nb_queues; i < old_nb_queues; i++)
420                         (*dev->dev_ops->tx_queue_release)(txq[i]);
421                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
422                                 CACHE_LINE_SIZE);
423                 if (txq == NULL)
424                         return -(ENOMEM);
425
426                 if (nb_queues > old_nb_queues)
427                         memset(txq + old_nb_queues, 0,
428                                 sizeof(txq[0]) * (nb_queues - old_nb_queues));
429
430                 dev->data->tx_queues = txq;
431
432         }
433         dev->data->nb_tx_queues = nb_queues;
434         return (0);
435 }
436
437 static int
438 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
439                       const struct rte_eth_conf *dev_conf)
440 {
441         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
442
443         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
444                 /* check multi-queue mode */
445                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ||
446                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
447                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
448                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
449                         /* SRIOV only works in VMDq enable mode */
450                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
451                                         " SRIOV active, "
452                                         "wrong VMDQ mq_mode rx %u tx %u\n",
453                                         port_id,
454                                         dev_conf->rxmode.mq_mode,
455                                         dev_conf->txmode.mq_mode);
456                         return (-EINVAL);
457                 }
458
459                 switch (dev_conf->rxmode.mq_mode) {
460                 case ETH_MQ_RX_VMDQ_RSS:
461                 case ETH_MQ_RX_VMDQ_DCB:
462                 case ETH_MQ_RX_VMDQ_DCB_RSS:
463                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
464                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
465                                         " SRIOV active, "
466                                         "unsupported VMDQ mq_mode rx %u\n",
467                                         port_id, dev_conf->rxmode.mq_mode);
468                         return (-EINVAL);
469                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
470                         /* if nothing mq mode configure, use default scheme */
471                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
472                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
473                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
474                         break;
475                 }
476
477                 switch (dev_conf->txmode.mq_mode) {
478                 case ETH_MQ_TX_VMDQ_DCB:
479                         /* DCB VMDQ in SRIOV mode, not implement yet */
480                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
481                                         " SRIOV active, "
482                                         "unsupported VMDQ mq_mode tx %u\n",
483                                         port_id, dev_conf->txmode.mq_mode);
484                         return (-EINVAL);
485                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
486                         /* if nothing mq mode configure, use default scheme */
487                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
488                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
489                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
490                         break;
491                 }
492
493                 /* check valid queue number */
494                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
495                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
496                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
497                                     "queue number must less equal to %d\n",
498                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
499                         return (-EINVAL);
500                 }
501         } else {
502                 /* For vmdb+dcb mode check our configuration before we go further */
503                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
504                         const struct rte_eth_vmdq_dcb_conf *conf;
505
506                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
507                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
508                                                 "!= %d\n",
509                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
510                                 return (-EINVAL);
511                         }
512                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
513                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
514                                conf->nb_queue_pools == ETH_32_POOLS)) {
515                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
516                                                 "nb_queue_pools must be %d or %d\n",
517                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
518                                 return (-EINVAL);
519                         }
520                 }
521                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
522                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
523
524                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
525                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
526                                                 "!= %d\n",
527                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
528                                 return (-EINVAL);
529                         }
530                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
531                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
532                                conf->nb_queue_pools == ETH_32_POOLS)) {
533                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
534                                                 "nb_queue_pools != %d or nb_queue_pools "
535                                                 "!= %d\n",
536                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
537                                 return (-EINVAL);
538                         }
539                 }
540
541                 /* For DCB mode check our configuration before we go further */
542                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
543                         const struct rte_eth_dcb_rx_conf *conf;
544
545                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
546                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
547                                                 "!= %d\n",
548                                                 port_id, ETH_DCB_NUM_QUEUES);
549                                 return (-EINVAL);
550                         }
551                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
552                         if (! (conf->nb_tcs == ETH_4_TCS ||
553                                conf->nb_tcs == ETH_8_TCS)) {
554                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
555                                                 "nb_tcs != %d or nb_tcs "
556                                                 "!= %d\n",
557                                                 port_id, ETH_4_TCS, ETH_8_TCS);
558                                 return (-EINVAL);
559                         }
560                 }
561
562                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
563                         const struct rte_eth_dcb_tx_conf *conf;
564
565                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
566                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
567                                                 "!= %d\n",
568                                                 port_id, ETH_DCB_NUM_QUEUES);
569                                 return (-EINVAL);
570                         }
571                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
572                         if (! (conf->nb_tcs == ETH_4_TCS ||
573                                conf->nb_tcs == ETH_8_TCS)) {
574                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
575                                                 "nb_tcs != %d or nb_tcs "
576                                                 "!= %d\n",
577                                                 port_id, ETH_4_TCS, ETH_8_TCS);
578                                 return (-EINVAL);
579                         }
580                 }
581         }
582         return 0;
583 }
584
585 int
586 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
587                       const struct rte_eth_conf *dev_conf)
588 {
589         struct rte_eth_dev *dev;
590         struct rte_eth_dev_info dev_info;
591         int diag;
592
593         /* This function is only safe when called from the primary process
594          * in a multi-process setup*/
595         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
596
597         if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
598                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
599                 return (-EINVAL);
600         }
601         dev = &rte_eth_devices[port_id];
602
603         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
604         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
605
606         if (dev->data->dev_started) {
607                 PMD_DEBUG_TRACE(
608                     "port %d must be stopped to allow configuration\n", port_id);
609                 return (-EBUSY);
610         }
611
612         /*
613          * Check that the numbers of RX and TX queues are not greater
614          * than the maximum number of RX and TX queues supported by the
615          * configured device.
616          */
617         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
618         if (nb_rx_q > dev_info.max_rx_queues) {
619                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
620                                 port_id, nb_rx_q, dev_info.max_rx_queues);
621                 return (-EINVAL);
622         }
623         if (nb_rx_q == 0) {
624                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
625                 return (-EINVAL);
626         }
627
628         if (nb_tx_q > dev_info.max_tx_queues) {
629                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
630                                 port_id, nb_tx_q, dev_info.max_tx_queues);
631                 return (-EINVAL);
632         }
633         if (nb_tx_q == 0) {
634                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
635                 return (-EINVAL);
636         }
637
638         /* Copy the dev_conf parameter into the dev structure */
639         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
640
641         /*
642          * If jumbo frames are enabled, check that the maximum RX packet
643          * length is supported by the configured device.
644          */
645         if (dev_conf->rxmode.jumbo_frame == 1) {
646                 if (dev_conf->rxmode.max_rx_pkt_len >
647                     dev_info.max_rx_pktlen) {
648                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
649                                 " > max valid value %u\n",
650                                 port_id,
651                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
652                                 (unsigned)dev_info.max_rx_pktlen);
653                         return (-EINVAL);
654                 }
655                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
656                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
657                                 " < min valid value %u\n",
658                                 port_id,
659                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
660                                 (unsigned)ETHER_MIN_LEN);
661                         return (-EINVAL);
662                 }
663         } else
664                 /* Use default value */
665                 dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
666
667         /* multipe queue mode checking */
668         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
669         if (diag != 0) {
670                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
671                                 port_id, diag);
672                 return diag;
673         }
674
675         /*
676          * Setup new number of RX/TX queues and reconfigure device.
677          */
678         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
679         if (diag != 0) {
680                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
681                                 port_id, diag);
682                 return diag;
683         }
684
685         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
686         if (diag != 0) {
687                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
688                                 port_id, diag);
689                 rte_eth_dev_rx_queue_config(dev, 0);
690                 return diag;
691         }
692
693         diag = (*dev->dev_ops->dev_configure)(dev);
694         if (diag != 0) {
695                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
696                                 port_id, diag);
697                 rte_eth_dev_rx_queue_config(dev, 0);
698                 rte_eth_dev_tx_queue_config(dev, 0);
699                 return diag;
700         }
701
702         return 0;
703 }
704
705 static void
706 rte_eth_dev_config_restore(uint8_t port_id)
707 {
708         struct rte_eth_dev *dev;
709         struct rte_eth_dev_info dev_info;
710         struct ether_addr addr;
711         uint16_t i;
712         uint32_t pool = 0;
713
714         dev = &rte_eth_devices[port_id];
715
716         rte_eth_dev_info_get(port_id, &dev_info);
717
718         if (RTE_ETH_DEV_SRIOV(dev).active)
719                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
720
721         /* replay MAC address configuration */
722         for (i = 0; i < dev_info.max_mac_addrs; i++) {
723                 addr = dev->data->mac_addrs[i];
724
725                 /* skip zero address */
726                 if (is_zero_ether_addr(&addr))
727                         continue;
728
729                 /* add address to the hardware */
730                 if  (*dev->dev_ops->mac_addr_add)
731                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
732                 else {
733                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
734                                         port_id);
735                         /* exit the loop but not return an error */
736                         break;
737                 }
738         }
739
740         /* replay promiscuous configuration */
741         if (rte_eth_promiscuous_get(port_id) == 1)
742                 rte_eth_promiscuous_enable(port_id);
743         else if (rte_eth_promiscuous_get(port_id) == 0)
744                 rte_eth_promiscuous_disable(port_id);
745
746         /* replay allmulticast configuration */
747         if (rte_eth_allmulticast_get(port_id) == 1)
748                 rte_eth_allmulticast_enable(port_id);
749         else if (rte_eth_allmulticast_get(port_id) == 0)
750                 rte_eth_allmulticast_disable(port_id);
751 }
752
753 int
754 rte_eth_dev_start(uint8_t port_id)
755 {
756         struct rte_eth_dev *dev;
757         int diag;
758
759         /* This function is only safe when called from the primary process
760          * in a multi-process setup*/
761         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
762
763         if (port_id >= nb_ports) {
764                 PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
765                 return (-EINVAL);
766         }
767         dev = &rte_eth_devices[port_id];
768
769         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
770
771         if (dev->data->dev_started != 0) {
772                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
773                         " already started\n",
774                         port_id);
775                 return (0);
776         }
777
778         diag = (*dev->dev_ops->dev_start)(dev);
779         if (diag == 0)
780                 dev->data->dev_started = 1;
781         else
782                 return diag;
783
784         rte_eth_dev_config_restore(port_id);
785
786         return 0;
787 }
788
789 void
790 rte_eth_dev_stop(uint8_t port_id)
791 {
792         struct rte_eth_dev *dev;
793
794         /* This function is only safe when called from the primary process
795          * in a multi-process setup*/
796         PROC_PRIMARY_OR_RET();
797
798         if (port_id >= nb_ports) {
799                 PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
800                 return;
801         }
802         dev = &rte_eth_devices[port_id];
803
804         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
805
806         if (dev->data->dev_started == 0) {
807                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
808                         " already stopped\n",
809                         port_id);
810                 return;
811         }
812
813         dev->data->dev_started = 0;
814         (*dev->dev_ops->dev_stop)(dev);
815 }
816
817 void
818 rte_eth_dev_close(uint8_t port_id)
819 {
820         struct rte_eth_dev *dev;
821
822         /* This function is only safe when called from the primary process
823          * in a multi-process setup*/
824         PROC_PRIMARY_OR_RET();
825
826         if (port_id >= nb_ports) {
827                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
828                 return;
829         }
830
831         dev = &rte_eth_devices[port_id];
832
833         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
834         dev->data->dev_started = 0;
835         (*dev->dev_ops->dev_close)(dev);
836 }
837
838 int
839 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
840                        uint16_t nb_rx_desc, unsigned int socket_id,
841                        const struct rte_eth_rxconf *rx_conf,
842                        struct rte_mempool *mp)
843 {
844         struct rte_eth_dev *dev;
845         struct rte_pktmbuf_pool_private *mbp_priv;
846         struct rte_eth_dev_info dev_info;
847
848         /* This function is only safe when called from the primary process
849          * in a multi-process setup*/
850         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
851
852         if (port_id >= nb_ports) {
853                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
854                 return (-EINVAL);
855         }
856         dev = &rte_eth_devices[port_id];
857         if (rx_queue_id >= dev->data->nb_rx_queues) {
858                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
859                 return (-EINVAL);
860         }
861
862         if (dev->data->dev_started) {
863                 PMD_DEBUG_TRACE(
864                     "port %d must be stopped to allow configuration\n", port_id);
865                 return -EBUSY;
866         }
867
868         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
869         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
870
871         /*
872          * Check the size of the mbuf data buffer.
873          * This value must be provided in the private data of the memory pool.
874          * First check that the memory pool has a valid private data.
875          */
876         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
877         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
878                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
879                                 mp->name, (int) mp->private_data_size,
880                                 (int) sizeof(struct rte_pktmbuf_pool_private));
881                 return (-ENOSPC);
882         }
883         mbp_priv = rte_mempool_get_priv(mp);
884         if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
885             dev_info.min_rx_bufsize) {
886                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
887                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
888                                 "=%d)\n",
889                                 mp->name,
890                                 (int)mbp_priv->mbuf_data_room_size,
891                                 (int)(RTE_PKTMBUF_HEADROOM +
892                                       dev_info.min_rx_bufsize),
893                                 (int)RTE_PKTMBUF_HEADROOM,
894                                 (int)dev_info.min_rx_bufsize);
895                 return (-EINVAL);
896         }
897
898         return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
899                                                socket_id, rx_conf, mp);
900 }
901
902 int
903 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
904                        uint16_t nb_tx_desc, unsigned int socket_id,
905                        const struct rte_eth_txconf *tx_conf)
906 {
907         struct rte_eth_dev *dev;
908
909         /* This function is only safe when called from the primary process
910          * in a multi-process setup*/
911         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
912
913         if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
914                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
915                 return (-EINVAL);
916         }
917         dev = &rte_eth_devices[port_id];
918         if (tx_queue_id >= dev->data->nb_tx_queues) {
919                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
920                 return (-EINVAL);
921         }
922
923         if (dev->data->dev_started) {
924                 PMD_DEBUG_TRACE(
925                     "port %d must be stopped to allow configuration\n", port_id);
926                 return -EBUSY;
927         }
928
929         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
930         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
931                                                socket_id, tx_conf);
932 }
933
934 void
935 rte_eth_promiscuous_enable(uint8_t port_id)
936 {
937         struct rte_eth_dev *dev;
938
939         if (port_id >= nb_ports) {
940                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
941                 return;
942         }
943         dev = &rte_eth_devices[port_id];
944
945         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
946         (*dev->dev_ops->promiscuous_enable)(dev);
947         dev->data->promiscuous = 1;
948 }
949
950 void
951 rte_eth_promiscuous_disable(uint8_t port_id)
952 {
953         struct rte_eth_dev *dev;
954
955         if (port_id >= nb_ports) {
956                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
957                 return;
958         }
959         dev = &rte_eth_devices[port_id];
960
961         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
962         dev->data->promiscuous = 0;
963         (*dev->dev_ops->promiscuous_disable)(dev);
964 }
965
966 int
967 rte_eth_promiscuous_get(uint8_t port_id)
968 {
969         struct rte_eth_dev *dev;
970
971         if (port_id >= nb_ports) {
972                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
973                 return -1;
974         }
975
976         dev = &rte_eth_devices[port_id];
977         return dev->data->promiscuous;
978 }
979
980 void
981 rte_eth_allmulticast_enable(uint8_t port_id)
982 {
983         struct rte_eth_dev *dev;
984
985         if (port_id >= nb_ports) {
986                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
987                 return;
988         }
989         dev = &rte_eth_devices[port_id];
990
991         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
992         (*dev->dev_ops->allmulticast_enable)(dev);
993         dev->data->all_multicast = 1;
994 }
995
996 void
997 rte_eth_allmulticast_disable(uint8_t port_id)
998 {
999         struct rte_eth_dev *dev;
1000
1001         if (port_id >= nb_ports) {
1002                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1003                 return;
1004         }
1005         dev = &rte_eth_devices[port_id];
1006
1007         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1008         dev->data->all_multicast = 0;
1009         (*dev->dev_ops->allmulticast_disable)(dev);
1010 }
1011
1012 int
1013 rte_eth_allmulticast_get(uint8_t port_id)
1014 {
1015         struct rte_eth_dev *dev;
1016
1017         if (port_id >= nb_ports) {
1018                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1019                 return -1;
1020         }
1021
1022         dev = &rte_eth_devices[port_id];
1023         return dev->data->all_multicast;
1024 }
1025
1026 static inline int
1027 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1028                                 struct rte_eth_link *link)
1029 {
1030         struct rte_eth_link *dst = link;
1031         struct rte_eth_link *src = &(dev->data->dev_link);
1032
1033         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1034                                         *(uint64_t *)src) == 0)
1035                 return -1;
1036
1037         return 0;
1038 }
1039
1040 void
1041 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1042 {
1043         struct rte_eth_dev *dev;
1044
1045         if (port_id >= nb_ports) {
1046                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1047                 return;
1048         }
1049         dev = &rte_eth_devices[port_id];
1050         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1051
1052         if (dev->data->dev_conf.intr_conf.lsc != 0)
1053                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1054         else {
1055                 (*dev->dev_ops->link_update)(dev, 1);
1056                 *eth_link = dev->data->dev_link;
1057         }
1058 }
1059
1060 void
1061 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1062 {
1063         struct rte_eth_dev *dev;
1064
1065         if (port_id >= nb_ports) {
1066                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1067                 return;
1068         }
1069         dev = &rte_eth_devices[port_id];
1070         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1071
1072         if (dev->data->dev_conf.intr_conf.lsc != 0)
1073                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1074         else {
1075                 (*dev->dev_ops->link_update)(dev, 0);
1076                 *eth_link = dev->data->dev_link;
1077         }
1078 }
1079
1080 void
1081 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1082 {
1083         struct rte_eth_dev *dev;
1084
1085         if (port_id >= nb_ports) {
1086                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1087                 return;
1088         }
1089         dev = &rte_eth_devices[port_id];
1090         memset(stats, 0, sizeof(*stats));
1091
1092         FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
1093         (*dev->dev_ops->stats_get)(dev, stats);
1094         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1095 }
1096
1097 void
1098 rte_eth_stats_reset(uint8_t port_id)
1099 {
1100         struct rte_eth_dev *dev;
1101
1102         if (port_id >= nb_ports) {
1103                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1104                 return;
1105         }
1106         dev = &rte_eth_devices[port_id];
1107
1108         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1109         (*dev->dev_ops->stats_reset)(dev);
1110 }
1111
1112
1113 static int
1114 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1115                 uint8_t is_rx)
1116 {
1117         struct rte_eth_dev *dev;
1118
1119         if (port_id >= nb_ports) {
1120                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1121                 return -ENODEV;
1122         }
1123         dev = &rte_eth_devices[port_id];
1124
1125         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1126         return (*dev->dev_ops->queue_stats_mapping_set)
1127                         (dev, queue_id, stat_idx, is_rx);
1128 }
1129
1130
1131 int
1132 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1133                 uint8_t stat_idx)
1134 {
1135         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1136                         STAT_QMAP_TX);
1137 }
1138
1139
1140 int
1141 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1142                 uint8_t stat_idx)
1143 {
1144         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1145                         STAT_QMAP_RX);
1146 }
1147
1148
1149 void
1150 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1151 {
1152         struct rte_eth_dev *dev;
1153
1154         if (port_id >= nb_ports) {
1155                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1156                 return;
1157         }
1158         dev = &rte_eth_devices[port_id];
1159
1160         /* Default device offload capabilities to zero */
1161         dev_info->rx_offload_capa = 0;
1162         dev_info->tx_offload_capa = 0;
1163         dev_info->if_index = 0;
1164         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1165         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1166         dev_info->pci_dev = dev->pci_dev;
1167         if (dev->driver)
1168                 dev_info->driver_name = dev->driver->pci_drv.name;
1169 }
1170
1171 void
1172 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1173 {
1174         struct rte_eth_dev *dev;
1175
1176         if (port_id >= nb_ports) {
1177                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1178                 return;
1179         }
1180         dev = &rte_eth_devices[port_id];
1181         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1182 }
1183
1184 int
1185 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1186 {
1187         struct rte_eth_dev *dev;
1188
1189         if (port_id >= nb_ports) {
1190                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1191                 return (-ENODEV);
1192         }
1193         dev = &rte_eth_devices[port_id];
1194         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1195                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1196                 return (-ENOSYS);
1197         }
1198
1199         if (vlan_id > 4095) {
1200                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1201                                 port_id, (unsigned) vlan_id);
1202                 return (-EINVAL);
1203         }
1204         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1205         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1206         return (0);
1207 }
1208
1209 int
1210 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1211 {
1212         struct rte_eth_dev *dev;
1213
1214         if (port_id >= nb_ports) {
1215                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1216                 return (-ENODEV);
1217         }
1218
1219         dev = &rte_eth_devices[port_id];
1220         if (rx_queue_id >= dev->data->nb_rx_queues) {
1221                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1222                 return (-EINVAL);
1223         }
1224
1225         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1226         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1227
1228         return (0);
1229 }
1230
1231 int
1232 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1233 {
1234         struct rte_eth_dev *dev;
1235
1236         if (port_id >= nb_ports) {
1237                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1238                 return (-ENODEV);
1239         }
1240
1241         dev = &rte_eth_devices[port_id];
1242         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1243         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1244
1245         return (0);
1246 }
1247
1248 int
1249 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1250 {
1251         struct rte_eth_dev *dev;
1252         int ret = 0;
1253         int mask = 0;
1254         int cur, org = 0;
1255
1256         if (port_id >= nb_ports) {
1257                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1258                 return (-ENODEV);
1259         }
1260
1261         dev = &rte_eth_devices[port_id];
1262
1263         /*check which option changed by application*/
1264         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1265         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1266         if (cur != org){
1267                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1268                 mask |= ETH_VLAN_STRIP_MASK;
1269         }
1270
1271         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1272         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1273         if (cur != org){
1274                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1275                 mask |= ETH_VLAN_FILTER_MASK;
1276         }
1277
1278         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1279         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1280         if (cur != org){
1281                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1282                 mask |= ETH_VLAN_EXTEND_MASK;
1283         }
1284
1285         /*no change*/
1286         if(mask == 0)
1287                 return ret;
1288
1289         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1290         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1291
1292         return ret;
1293 }
1294
1295 int
1296 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1297 {
1298         struct rte_eth_dev *dev;
1299         int ret = 0;
1300
1301         if (port_id >= nb_ports) {
1302                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1303                 return (-ENODEV);
1304         }
1305
1306         dev = &rte_eth_devices[port_id];
1307
1308         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1309                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1310
1311         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1312                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1313
1314         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1315                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1316
1317         return ret;
1318 }
1319
1320
1321 int
1322 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1323                                       struct rte_fdir_filter *fdir_filter,
1324                                       uint8_t queue)
1325 {
1326         struct rte_eth_dev *dev;
1327
1328         if (port_id >= nb_ports) {
1329                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1330                 return (-ENODEV);
1331         }
1332
1333         dev = &rte_eth_devices[port_id];
1334
1335         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1336                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1337                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1338                 return (-ENOSYS);
1339         }
1340
1341         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1342              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1343             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1344                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1345                                 "None l4type, source & destinations ports " \
1346                                 "should be null!\n");
1347                 return (-EINVAL);
1348         }
1349
1350         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1351         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1352                                                                 queue);
1353 }
1354
1355 int
1356 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1357                                          struct rte_fdir_filter *fdir_filter,
1358                                          uint8_t queue)
1359 {
1360         struct rte_eth_dev *dev;
1361
1362         if (port_id >= nb_ports) {
1363                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1364                 return (-ENODEV);
1365         }
1366
1367         dev = &rte_eth_devices[port_id];
1368
1369         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1370                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1371                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1372                 return (-ENOSYS);
1373         }
1374
1375         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1376              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1377             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1378                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1379                                 "None l4type, source & destinations ports " \
1380                                 "should be null!\n");
1381                 return (-EINVAL);
1382         }
1383
1384         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1385         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1386                                                                 queue);
1387
1388 }
1389
1390 int
1391 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1392                                          struct rte_fdir_filter *fdir_filter)
1393 {
1394         struct rte_eth_dev *dev;
1395
1396         if (port_id >= nb_ports) {
1397                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1398                 return (-ENODEV);
1399         }
1400
1401         dev = &rte_eth_devices[port_id];
1402
1403         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1404                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1405                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1406                 return (-ENOSYS);
1407         }
1408
1409         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1410              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1411             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1412                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1413                                 "None l4type source & destinations ports " \
1414                                 "should be null!\n");
1415                 return (-EINVAL);
1416         }
1417
1418         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1419         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1420 }
1421
1422 int
1423 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1424 {
1425         struct rte_eth_dev *dev;
1426
1427         if (port_id >= nb_ports) {
1428                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1429                 return (-ENODEV);
1430         }
1431
1432         dev = &rte_eth_devices[port_id];
1433         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1434                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1435                 return (-ENOSYS);
1436         }
1437
1438         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1439
1440         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1441         return (0);
1442 }
1443
1444 int
1445 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1446                                     struct rte_fdir_filter *fdir_filter,
1447                                     uint16_t soft_id, uint8_t queue,
1448                                     uint8_t drop)
1449 {
1450         struct rte_eth_dev *dev;
1451
1452         if (port_id >= nb_ports) {
1453                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1454                 return (-ENODEV);
1455         }
1456
1457         dev = &rte_eth_devices[port_id];
1458
1459         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1460                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1461                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1462                 return (-ENOSYS);
1463         }
1464
1465         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1466              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1467             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1468                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1469                                 "None l4type, source & destinations ports " \
1470                                 "should be null!\n");
1471                 return (-EINVAL);
1472         }
1473
1474         /* For now IPv6 is not supported with perfect filter */
1475         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1476                 return (-ENOTSUP);
1477
1478         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1479         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1480                                                                 soft_id, queue,
1481                                                                 drop);
1482 }
1483
1484 int
1485 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1486                                        struct rte_fdir_filter *fdir_filter,
1487                                        uint16_t soft_id, uint8_t queue,
1488                                        uint8_t drop)
1489 {
1490         struct rte_eth_dev *dev;
1491
1492         if (port_id >= nb_ports) {
1493                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1494                 return (-ENODEV);
1495         }
1496
1497         dev = &rte_eth_devices[port_id];
1498
1499         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1500                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1501                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1502                 return (-ENOSYS);
1503         }
1504
1505         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1506              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1507             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1508                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1509                                 "None l4type, source & destinations ports " \
1510                                 "should be null!\n");
1511                 return (-EINVAL);
1512         }
1513
1514         /* For now IPv6 is not supported with perfect filter */
1515         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1516                 return (-ENOTSUP);
1517
1518         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1519         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1520                                                         soft_id, queue, drop);
1521 }
1522
1523 int
1524 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1525                                        struct rte_fdir_filter *fdir_filter,
1526                                        uint16_t soft_id)
1527 {
1528         struct rte_eth_dev *dev;
1529
1530         if (port_id >= nb_ports) {
1531                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1532                 return (-ENODEV);
1533         }
1534
1535         dev = &rte_eth_devices[port_id];
1536
1537         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1538                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1539                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1540                 return (-ENOSYS);
1541         }
1542
1543         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1544              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1545             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1546                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1547                                 "None l4type, source & destinations ports " \
1548                                 "should be null!\n");
1549                 return (-EINVAL);
1550         }
1551
1552         /* For now IPv6 is not supported with perfect filter */
1553         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1554                 return (-ENOTSUP);
1555
1556         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1557         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
1558                                                                 soft_id);
1559 }
1560
1561 int
1562 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1563 {
1564         struct rte_eth_dev *dev;
1565
1566         if (port_id >= nb_ports) {
1567                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1568                 return (-ENODEV);
1569         }
1570
1571         dev = &rte_eth_devices[port_id];
1572         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1573                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1574                 return (-ENOSYS);
1575         }
1576
1577         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1578         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1579 }
1580
1581 int
1582 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1583 {
1584         struct rte_eth_dev *dev;
1585
1586         if (port_id >= nb_ports) {
1587                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1588                 return (-ENODEV);
1589         }
1590
1591         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1592                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1593                 return (-EINVAL);
1594         }
1595
1596         dev = &rte_eth_devices[port_id];
1597         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1598         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1599 }
1600
1601 int
1602 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1603 {
1604         struct rte_eth_dev *dev;
1605
1606         if (port_id >= nb_ports) {
1607                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1608                 return (-ENODEV);
1609         }
1610
1611         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1612                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1613                 return (-EINVAL);
1614         }
1615
1616         dev = &rte_eth_devices[port_id];
1617         /* High water, low water validation are device specific */
1618         if  (*dev->dev_ops->priority_flow_ctrl_set)
1619                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1620         return (-ENOTSUP);
1621 }
1622
1623 int
1624 rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1625 {
1626         struct rte_eth_dev *dev;
1627         uint16_t max_rxq;
1628         uint8_t i,j;
1629
1630         if (port_id >= nb_ports) {
1631                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1632                 return (-ENODEV);
1633         }
1634
1635         /* Invalid mask bit(s) setting */
1636         if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1637                 PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
1638                 return (-EINVAL);
1639         }
1640
1641         dev = &rte_eth_devices[port_id];
1642         max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ?
1643                 dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE;
1644         if (reta_conf->mask_lo != 0) {
1645                 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1646                         if ((reta_conf->mask_lo & (1ULL << i)) &&
1647                                 (reta_conf->reta[i] >= max_rxq)) {
1648                                 PMD_DEBUG_TRACE("RETA hash index output"
1649                                         "configration for port=%d,invalid"
1650                                         "queue=%d\n",port_id,reta_conf->reta[i]);
1651
1652                                 return (-EINVAL);
1653                         }
1654                 }
1655         }
1656
1657         if (reta_conf->mask_hi != 0) {
1658                 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1659                         j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
1660
1661                         /* Check if the max entry >= 128 */
1662                         if ((reta_conf->mask_hi & (1ULL << i)) &&
1663                                 (reta_conf->reta[j] >= max_rxq)) {
1664                                 PMD_DEBUG_TRACE("RETA hash index output"
1665                                         "configration for port=%d,invalid"
1666                                         "queue=%d\n",port_id,reta_conf->reta[j]);
1667
1668                                 return (-EINVAL);
1669                         }
1670                 }
1671         }
1672
1673         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1674         return (*dev->dev_ops->reta_update)(dev, reta_conf);
1675 }
1676
1677 int
1678 rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1679 {
1680         struct rte_eth_dev *dev;
1681
1682         if (port_id >= nb_ports) {
1683                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1684                 return (-ENODEV);
1685         }
1686
1687         if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1688                 PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
1689                 return (-EINVAL);
1690         }
1691
1692         dev = &rte_eth_devices[port_id];
1693         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1694         return (*dev->dev_ops->reta_query)(dev, reta_conf);
1695 }
1696
1697 int
1698 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1699 {
1700         struct rte_eth_dev *dev;
1701         uint16_t rss_hash_protos;
1702
1703         if (port_id >= nb_ports) {
1704                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1705                 return (-ENODEV);
1706         }
1707         rss_hash_protos = rss_conf->rss_hf;
1708         if ((rss_hash_protos != 0) &&
1709             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1710                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1711                                 rss_hash_protos);
1712                 return (-EINVAL);
1713         }
1714         dev = &rte_eth_devices[port_id];
1715         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1716         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1717 }
1718
1719 int
1720 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1721                               struct rte_eth_rss_conf *rss_conf)
1722 {
1723         struct rte_eth_dev *dev;
1724
1725         if (port_id >= nb_ports) {
1726                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1727                 return (-ENODEV);
1728         }
1729         dev = &rte_eth_devices[port_id];
1730         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1731         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1732 }
1733
1734 int
1735 rte_eth_led_on(uint8_t port_id)
1736 {
1737         struct rte_eth_dev *dev;
1738
1739         if (port_id >= nb_ports) {
1740                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1741                 return (-ENODEV);
1742         }
1743
1744         dev = &rte_eth_devices[port_id];
1745         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1746         return ((*dev->dev_ops->dev_led_on)(dev));
1747 }
1748
1749 int
1750 rte_eth_led_off(uint8_t port_id)
1751 {
1752         struct rte_eth_dev *dev;
1753
1754         if (port_id >= nb_ports) {
1755                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1756                 return (-ENODEV);
1757         }
1758
1759         dev = &rte_eth_devices[port_id];
1760         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1761         return ((*dev->dev_ops->dev_led_off)(dev));
1762 }
1763
1764 /*
1765  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1766  * an empty spot.
1767  */
1768 static inline int
1769 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1770 {
1771         struct rte_eth_dev_info dev_info;
1772         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1773         unsigned i;
1774
1775         rte_eth_dev_info_get(port_id, &dev_info);
1776
1777         for (i = 0; i < dev_info.max_mac_addrs; i++)
1778                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1779                         return i;
1780
1781         return -1;
1782 }
1783
1784 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
1785
1786 int
1787 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
1788                         uint32_t pool)
1789 {
1790         struct rte_eth_dev *dev;
1791         int index;
1792         uint64_t pool_mask;
1793
1794         if (port_id >= nb_ports) {
1795                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1796                 return (-ENODEV);
1797         }
1798         dev = &rte_eth_devices[port_id];
1799         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
1800
1801         if (is_zero_ether_addr(addr)) {
1802                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
1803                         port_id);
1804                 return (-EINVAL);
1805         }
1806         if (pool >= ETH_64_POOLS) {
1807                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
1808                 return (-EINVAL);
1809         }
1810
1811         index = get_mac_addr_index(port_id, addr);
1812         if (index < 0) {
1813                 index = get_mac_addr_index(port_id, &null_mac_addr);
1814                 if (index < 0) {
1815                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1816                                 port_id);
1817                         return (-ENOSPC);
1818                 }
1819         } else {
1820                 pool_mask = dev->data->mac_pool_sel[index];
1821
1822                 /* Check if both MAC address and pool is alread there, and do nothing */
1823                 if (pool_mask & (1ULL << pool))
1824                         return 0;
1825         }
1826
1827         /* Update NIC */
1828         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
1829
1830         /* Update address in NIC data structure */
1831         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
1832
1833         /* Update pool bitmap in NIC data structure */
1834         dev->data->mac_pool_sel[index] |= (1ULL << pool);
1835
1836         return 0;
1837 }
1838
1839 int
1840 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
1841 {
1842         struct rte_eth_dev *dev;
1843         int index;
1844
1845         if (port_id >= nb_ports) {
1846                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1847                 return (-ENODEV);
1848         }
1849         dev = &rte_eth_devices[port_id];
1850         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
1851
1852         index = get_mac_addr_index(port_id, addr);
1853         if (index == 0) {
1854                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
1855                 return (-EADDRINUSE);
1856         } else if (index < 0)
1857                 return 0;  /* Do nothing if address wasn't found */
1858
1859         /* Update NIC */
1860         (*dev->dev_ops->mac_addr_remove)(dev, index);
1861
1862         /* Update address in NIC data structure */
1863         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
1864
1865         return 0;
1866 }
1867
1868 int
1869 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
1870                                 uint16_t rx_mode, uint8_t on)
1871 {
1872         uint16_t num_vfs;
1873         struct rte_eth_dev *dev;
1874         struct rte_eth_dev_info dev_info;
1875
1876         if (port_id >= nb_ports) {
1877                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
1878                                 port_id);
1879                 return (-ENODEV);
1880         }
1881
1882         dev = &rte_eth_devices[port_id];
1883         rte_eth_dev_info_get(port_id, &dev_info);
1884
1885         num_vfs = dev_info.max_vfs;
1886         if (vf > num_vfs)
1887         {
1888                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
1889                 return (-EINVAL);
1890         }
1891         if (rx_mode == 0)
1892         {
1893                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
1894                 return (-EINVAL);
1895         }
1896         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
1897         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
1898 }
1899
1900 /*
1901  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1902  * an empty spot.
1903  */
1904 static inline int
1905 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1906 {
1907         struct rte_eth_dev_info dev_info;
1908         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1909         unsigned i;
1910
1911         rte_eth_dev_info_get(port_id, &dev_info);
1912         if (!dev->data->hash_mac_addrs)
1913                 return -1;
1914
1915         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
1916                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
1917                         ETHER_ADDR_LEN) == 0)
1918                         return i;
1919
1920         return -1;
1921 }
1922
1923 int
1924 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
1925                                 uint8_t on)
1926 {
1927         int index;
1928         int ret;
1929         struct rte_eth_dev *dev;
1930
1931         if (port_id >= nb_ports) {
1932                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
1933                         port_id);
1934                 return (-ENODEV);
1935         }
1936
1937         dev = &rte_eth_devices[port_id];
1938         if (is_zero_ether_addr(addr)) {
1939                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
1940                         port_id);
1941                 return (-EINVAL);
1942         }
1943
1944         index = get_hash_mac_addr_index(port_id, addr);
1945         /* Check if it's already there, and do nothing */
1946         if ((index >= 0) && (on))
1947                 return 0;
1948
1949         if (index < 0) {
1950                 if (!on) {
1951                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
1952                                 "set in UTA\n", port_id);
1953                         return (-EINVAL);
1954                 }
1955
1956                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
1957                 if (index < 0) {
1958                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1959                                         port_id);
1960                         return (-ENOSPC);
1961                 }
1962         }
1963
1964         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
1965         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
1966         if (ret == 0) {
1967                 /* Update address in NIC data structure */
1968                 if (on)
1969                         ether_addr_copy(addr,
1970                                         &dev->data->hash_mac_addrs[index]);
1971                 else
1972                         ether_addr_copy(&null_mac_addr,
1973                                         &dev->data->hash_mac_addrs[index]);
1974         }
1975
1976         return ret;
1977 }
1978
1979 int
1980 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
1981 {
1982         struct rte_eth_dev *dev;
1983
1984         if (port_id >= nb_ports) {
1985                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
1986                         port_id);
1987                 return (-ENODEV);
1988         }
1989
1990         dev = &rte_eth_devices[port_id];
1991
1992         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
1993         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
1994 }
1995
1996 int
1997 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
1998 {
1999         uint16_t num_vfs;
2000         struct rte_eth_dev *dev;
2001         struct rte_eth_dev_info dev_info;
2002
2003         if (port_id >= nb_ports) {
2004                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2005                 return (-ENODEV);
2006         }
2007
2008         dev = &rte_eth_devices[port_id];
2009         rte_eth_dev_info_get(port_id, &dev_info);
2010
2011         num_vfs = dev_info.max_vfs;
2012         if (vf > num_vfs)
2013         {
2014                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2015                 return (-EINVAL);
2016         }
2017
2018         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2019         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2020 }
2021
2022 int
2023 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2024 {
2025         uint16_t num_vfs;
2026         struct rte_eth_dev *dev;
2027         struct rte_eth_dev_info dev_info;
2028
2029         if (port_id >= nb_ports) {
2030                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2031                 return (-ENODEV);
2032         }
2033
2034         dev = &rte_eth_devices[port_id];
2035         rte_eth_dev_info_get(port_id, &dev_info);
2036
2037         num_vfs = dev_info.max_vfs;
2038         if (vf > num_vfs)
2039         {
2040                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2041                 return (-EINVAL);
2042         }
2043
2044         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2045         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2046 }
2047
2048 int
2049 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2050                                  uint64_t vf_mask,uint8_t vlan_on)
2051 {
2052         struct rte_eth_dev *dev;
2053
2054         if (port_id >= nb_ports) {
2055                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2056                                 port_id);
2057                 return (-ENODEV);
2058         }
2059         dev = &rte_eth_devices[port_id];
2060
2061         if(vlan_id > ETHER_MAX_VLAN_ID)
2062         {
2063                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2064                         vlan_id);
2065                 return (-EINVAL);
2066         }
2067         if (vf_mask == 0)
2068         {
2069                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2070                 return (-EINVAL);
2071         }
2072
2073         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2074         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2075                                                 vf_mask,vlan_on);
2076 }
2077
2078 int
2079 rte_eth_mirror_rule_set(uint8_t port_id,
2080                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
2081                         uint8_t rule_id, uint8_t on)
2082 {
2083         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2084
2085         if (port_id >= nb_ports) {
2086                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2087                 return (-ENODEV);
2088         }
2089
2090         if (mirror_conf->rule_type_mask == 0) {
2091                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2092                 return (-EINVAL);
2093         }
2094
2095         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2096                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
2097                         "be 0-%d\n",ETH_64_POOLS - 1);
2098                 return (-EINVAL);
2099         }
2100
2101         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
2102                 (mirror_conf->pool_mask == 0)) {
2103                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
2104                                 "be 0.\n");
2105                 return (-EINVAL);
2106         }
2107
2108         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2109         {
2110                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2111                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
2112                 return (-EINVAL);
2113         }
2114
2115         dev = &rte_eth_devices[port_id];
2116         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2117
2118         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2119 }
2120
2121 int
2122 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2123 {
2124         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2125
2126         if (port_id >= nb_ports) {
2127                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2128                 return (-ENODEV);
2129         }
2130
2131         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2132         {
2133                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2134                         ETH_VMDQ_NUM_MIRROR_RULE-1);
2135                 return (-EINVAL);
2136         }
2137
2138         dev = &rte_eth_devices[port_id];
2139         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2140
2141         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2142 }
2143
2144 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2145 uint16_t
2146 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2147                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2148 {
2149         struct rte_eth_dev *dev;
2150
2151         if (port_id >= nb_ports) {
2152                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2153                 return 0;
2154         }
2155         dev = &rte_eth_devices[port_id];
2156         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
2157         if (queue_id >= dev->data->nb_rx_queues) {
2158                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2159                 return 0;
2160         }
2161         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2162                                                 rx_pkts, nb_pkts);
2163 }
2164
2165 uint16_t
2166 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2167                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2168 {
2169         struct rte_eth_dev *dev;
2170
2171         if (port_id >= nb_ports) {
2172                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2173                 return 0;
2174         }
2175         dev = &rte_eth_devices[port_id];
2176
2177         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
2178         if (queue_id >= dev->data->nb_tx_queues) {
2179                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2180                 return 0;
2181         }
2182         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2183                                                 tx_pkts, nb_pkts);
2184 }
2185
2186 uint32_t
2187 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2188 {
2189         struct rte_eth_dev *dev;
2190
2191         if (port_id >= nb_ports) {
2192                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2193                 return 0;
2194         }
2195         dev = &rte_eth_devices[port_id];
2196         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2197         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2198 }
2199
2200 int
2201 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2202 {
2203         struct rte_eth_dev *dev;
2204
2205         if (port_id >= nb_ports) {
2206                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2207                 return (-ENODEV);
2208         }
2209         dev = &rte_eth_devices[port_id];
2210         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2211         return (*dev->dev_ops->rx_descriptor_done)( \
2212                 dev->data->rx_queues[queue_id], offset);
2213 }
2214 #endif
2215
2216 int
2217 rte_eth_dev_callback_register(uint8_t port_id,
2218                         enum rte_eth_event_type event,
2219                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2220 {
2221         struct rte_eth_dev *dev;
2222         struct rte_eth_dev_callback *user_cb;
2223
2224         if (!cb_fn)
2225                 return (-EINVAL);
2226         if (port_id >= nb_ports) {
2227                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2228                 return (-EINVAL);
2229         }
2230
2231         dev = &rte_eth_devices[port_id];
2232         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2233
2234         TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
2235                 if (user_cb->cb_fn == cb_fn &&
2236                         user_cb->cb_arg == cb_arg &&
2237                         user_cb->event == event) {
2238                         break;
2239                 }
2240         }
2241
2242         /* create a new callback. */
2243         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2244                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2245                 user_cb->cb_fn = cb_fn;
2246                 user_cb->cb_arg = cb_arg;
2247                 user_cb->event = event;
2248                 TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
2249         }
2250
2251         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2252         return ((user_cb == NULL) ? -ENOMEM : 0);
2253 }
2254
2255 int
2256 rte_eth_dev_callback_unregister(uint8_t port_id,
2257                         enum rte_eth_event_type event,
2258                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2259 {
2260         int ret;
2261         struct rte_eth_dev *dev;
2262         struct rte_eth_dev_callback *cb, *next;
2263
2264         if (!cb_fn)
2265                 return (-EINVAL);
2266         if (port_id >= nb_ports) {
2267                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2268                 return (-EINVAL);
2269         }
2270
2271         dev = &rte_eth_devices[port_id];
2272         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2273
2274         ret = 0;
2275         for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
2276
2277                 next = TAILQ_NEXT(cb, next);
2278
2279                 if (cb->cb_fn != cb_fn || cb->event != event ||
2280                                 (cb->cb_arg != (void *)-1 &&
2281                                 cb->cb_arg != cb_arg))
2282                         continue;
2283
2284                 /*
2285                  * if this callback is not executing right now,
2286                  * then remove it.
2287                  */
2288                 if (cb->active == 0) {
2289                         TAILQ_REMOVE(&(dev->callbacks), cb, next);
2290                         rte_free(cb);
2291                 } else {
2292                         ret = -EAGAIN;
2293                 }
2294         }
2295
2296         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2297         return (ret);
2298 }
2299
2300 void
2301 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2302         enum rte_eth_event_type event)
2303 {
2304         struct rte_eth_dev_callback *cb_lst;
2305         struct rte_eth_dev_callback dev_cb;
2306
2307         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2308         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
2309                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2310                         continue;
2311                 dev_cb = *cb_lst;
2312                 cb_lst->active = 1;
2313                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2314                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2315                                                 dev_cb.cb_arg);
2316                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2317                 cb_lst->active = 0;
2318         }
2319         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2320 }
2321 #ifdef RTE_NIC_BYPASS
2322 int rte_eth_dev_bypass_init(uint8_t port_id)
2323 {
2324         struct rte_eth_dev *dev;
2325
2326         if (port_id >= nb_ports) {
2327                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2328                 return (-ENODEV);
2329         }
2330
2331         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2332                 PMD_DEBUG_TRACE("Invalid port device\n");
2333                 return (-ENODEV);
2334         }
2335
2336         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2337         (*dev->dev_ops->bypass_init)(dev);
2338         return 0;
2339 }
2340
2341 int
2342 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2343 {
2344         struct rte_eth_dev *dev;
2345
2346         if (port_id >= nb_ports) {
2347                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2348                 return (-ENODEV);
2349         }
2350
2351         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2352                 PMD_DEBUG_TRACE("Invalid port device\n");
2353                 return (-ENODEV);
2354         }
2355         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2356         (*dev->dev_ops->bypass_state_show)(dev, state);
2357         return 0;
2358 }
2359
2360 int
2361 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2362 {
2363         struct rte_eth_dev *dev;
2364
2365         if (port_id >= nb_ports) {
2366                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2367                 return (-ENODEV);
2368         }
2369
2370         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2371                 PMD_DEBUG_TRACE("Invalid port device\n");
2372                 return (-ENODEV);
2373         }
2374
2375         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2376         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2377         return 0;
2378 }
2379
2380 int
2381 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2382 {
2383         struct rte_eth_dev *dev;
2384
2385         if (port_id >= nb_ports) {
2386                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2387                 return (-ENODEV);
2388         }
2389
2390         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2391                 PMD_DEBUG_TRACE("Invalid port device\n");
2392                 return (-ENODEV);
2393         }
2394
2395         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2396         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2397         return 0;
2398 }
2399
2400 int
2401 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2402 {
2403         struct rte_eth_dev *dev;
2404
2405         if (port_id >= nb_ports) {
2406                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2407                 return (-ENODEV);
2408         }
2409
2410         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2411                 PMD_DEBUG_TRACE("Invalid port device\n");
2412                 return (-ENODEV);
2413         }
2414
2415         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2416         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2417         return 0;
2418 }
2419
2420 int
2421 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2422 {
2423         struct rte_eth_dev *dev;
2424
2425         if (port_id >= nb_ports) {
2426                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2427                 return (-ENODEV);
2428         }
2429
2430         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2431                 PMD_DEBUG_TRACE("Invalid port device\n");
2432                 return (-ENODEV);
2433         }
2434
2435         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2436         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2437         return 0;
2438 }
2439
2440 int
2441 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2442 {
2443         struct rte_eth_dev *dev;
2444
2445         if (port_id >= nb_ports) {
2446                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2447                 return (-ENODEV);
2448         }
2449
2450         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2451                 PMD_DEBUG_TRACE("Invalid port device\n");
2452                 return (-ENODEV);
2453         }
2454
2455         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2456         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2457         return 0;
2458 }
2459
2460 int
2461 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2462 {
2463         struct rte_eth_dev *dev;
2464
2465         if (port_id >= nb_ports) {
2466                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2467                 return (-ENODEV);
2468         }
2469
2470         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2471                 PMD_DEBUG_TRACE("Invalid port device\n");
2472                 return (-ENODEV);
2473         }
2474
2475         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2476         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2477         return 0;
2478 }
2479
2480 int
2481 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2482 {
2483         struct rte_eth_dev *dev;
2484
2485         if (port_id >= nb_ports) {
2486                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2487                 return (-ENODEV);
2488         }
2489
2490         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2491                 PMD_DEBUG_TRACE("Invalid port device\n");
2492                 return (-ENODEV);
2493         }
2494
2495         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2496         (*dev->dev_ops->bypass_wd_reset)(dev);
2497         return 0;
2498 }
2499 #endif