ethdev: prevent from starting/stopping already started/stopped device
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44
45 #include <rte_byteorder.h>
46 #include <rte_log.h>
47 #include <rte_debug.h>
48 #include <rte_interrupts.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
72 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
73                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
74         } while (0)
75 #else
76 #define PMD_DEBUG_TRACE(fmt, args...)
77 #endif
78
79 /* Macros for checking for restricting functions to primary instance only */
80 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
81         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
82                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
83                 return (retval); \
84         } \
85 } while(0)
86 #define PROC_PRIMARY_OR_RET() do { \
87         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
88                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
89                 return; \
90         } \
91 } while(0)
92
93 /* Macros to check for invlaid function pointers in dev_ops structure */
94 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
95         if ((func) == NULL) { \
96                 PMD_DEBUG_TRACE("Function not supported\n"); \
97                 return (retval); \
98         } \
99 } while(0)
100 #define FUNC_PTR_OR_RET(func) do { \
101         if ((func) == NULL) { \
102                 PMD_DEBUG_TRACE("Function not supported\n"); \
103                 return; \
104         } \
105 } while(0)
106
107 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
108 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
109 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
110 static uint8_t nb_ports = 0;
111
112 /* spinlock for eth device callbacks */
113 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
114
115 /**
116  * The user application callback description.
117  *
118  * It contains callback address to be registered by user application,
119  * the pointer to the parameters for callback, and the event type.
120  */
121 struct rte_eth_dev_callback {
122         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
123         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
124         void *cb_arg;                           /**< Parameter for callback */
125         enum rte_eth_event_type event;          /**< Interrupt event type */
126         uint32_t active;                        /**< Callback is executing */
127 };
128
129 enum {
130         STAT_QMAP_TX = 0,
131         STAT_QMAP_RX
132 };
133
134 static inline void
135 rte_eth_dev_data_alloc(void)
136 {
137         const unsigned flags = 0;
138         const struct rte_memzone *mz;
139
140         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
141                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
142                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
143                                 rte_socket_id(), flags);
144         } else
145                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
146         if (mz == NULL)
147                 rte_panic("Cannot allocate memzone for ethernet port data\n");
148
149         rte_eth_dev_data = mz->addr;
150         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
151                 memset(rte_eth_dev_data, 0,
152                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
153 }
154
155 struct rte_eth_dev *
156 rte_eth_dev_allocate(void)
157 {
158         struct rte_eth_dev *eth_dev;
159
160         if (nb_ports == RTE_MAX_ETHPORTS) {
161                 PMD_DEBUG_TRACE("Reached maximum number of ethernet ports\n");
162                 return NULL;
163         }
164
165         if (rte_eth_dev_data == NULL)
166                 rte_eth_dev_data_alloc();
167
168         eth_dev = &rte_eth_devices[nb_ports];
169         eth_dev->data = &rte_eth_dev_data[nb_ports];
170         eth_dev->data->port_id = nb_ports++;
171         return eth_dev;
172 }
173
174 static int
175 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
176                  struct rte_pci_device *pci_dev)
177 {
178         struct eth_driver    *eth_drv;
179         struct rte_eth_dev *eth_dev;
180         int diag;
181
182         eth_drv = (struct eth_driver *)pci_drv;
183
184         eth_dev = rte_eth_dev_allocate();
185         if (eth_dev == NULL)
186                 return -ENOMEM;
187
188         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
189                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
190                                   eth_drv->dev_private_size,
191                                   CACHE_LINE_SIZE);
192                 if (eth_dev->data->dev_private == NULL)
193                         rte_panic("Cannot allocate memzone for private port data\n");
194         }
195         eth_dev->pci_dev = pci_dev;
196         eth_dev->driver = eth_drv;
197         eth_dev->data->rx_mbuf_alloc_failed = 0;
198
199         /* init user callbacks */
200         TAILQ_INIT(&(eth_dev->callbacks));
201
202         /*
203          * Set the default maximum frame size.
204          */
205         eth_dev->data->max_frame_size = ETHER_MAX_LEN;
206
207         /* Invoke PMD device initialization function */
208         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
209         if (diag == 0)
210                 return (0);
211
212         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
213                         " failed\n", pci_drv->name,
214                         (unsigned) pci_dev->id.vendor_id,
215                         (unsigned) pci_dev->id.device_id);
216         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
217                 rte_free(eth_dev->data->dev_private);
218         nb_ports--;
219         return diag;
220 }
221
222 /**
223  * Register an Ethernet [Poll Mode] driver.
224  *
225  * Function invoked by the initialization function of an Ethernet driver
226  * to simultaneously register itself as a PCI driver and as an Ethernet
227  * Poll Mode Driver.
228  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
229  * structure embedded in the *eth_drv* structure, after having stored the
230  * address of the rte_eth_dev_init() function in the *devinit* field of
231  * the *pci_drv* structure.
232  * During the PCI probing phase, the rte_eth_dev_init() function is
233  * invoked for each PCI [Ethernet device] matching the embedded PCI
234  * identifiers provided by the driver.
235  */
236 void
237 rte_eth_driver_register(struct eth_driver *eth_drv)
238 {
239         eth_drv->pci_drv.devinit = rte_eth_dev_init;
240         rte_eal_pci_register(&eth_drv->pci_drv);
241 }
242
243 int
244 rte_eth_dev_socket_id(uint8_t port_id)
245 {
246         if (port_id >= nb_ports)
247                 return -1;
248         return rte_eth_devices[port_id].pci_dev->numa_node;
249 }
250
251 uint8_t
252 rte_eth_dev_count(void)
253 {
254         return (nb_ports);
255 }
256
257 static int
258 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
259 {
260         uint16_t old_nb_queues = dev->data->nb_rx_queues;
261         void **rxq;
262         unsigned i;
263
264         if (dev->data->rx_queues == NULL) { /* first time configuration */
265                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
266                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
267                                 CACHE_LINE_SIZE);
268                 if (dev->data->rx_queues == NULL) {
269                         dev->data->nb_rx_queues = 0;
270                         return -(ENOMEM);
271                 }
272         } else { /* re-configure */
273                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
274
275                 rxq = dev->data->rx_queues;
276
277                 for (i = nb_queues; i < old_nb_queues; i++)
278                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
279                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
280                                 CACHE_LINE_SIZE);
281                 if (rxq == NULL)
282                         return -(ENOMEM);
283
284                 if (nb_queues > old_nb_queues)
285                         memset(rxq + old_nb_queues, 0,
286                                 sizeof(rxq[0]) * (nb_queues - old_nb_queues));
287
288                 dev->data->rx_queues = rxq;
289
290         }
291         dev->data->nb_rx_queues = nb_queues;
292         return (0);
293 }
294
295 int
296 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
297 {
298         struct rte_eth_dev *dev;
299
300         /* This function is only safe when called from the primary process
301          * in a multi-process setup*/
302         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
303
304         if (port_id >= nb_ports) {
305                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
306                 return -EINVAL;
307         }
308
309         dev = &rte_eth_devices[port_id];
310         if (rx_queue_id >= dev->data->nb_rx_queues) {
311                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
312                 return -EINVAL;
313         }
314
315         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
316
317         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
318
319 }
320
321 int
322 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
323 {
324         struct rte_eth_dev *dev;
325
326         /* This function is only safe when called from the primary process
327          * in a multi-process setup*/
328         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
329
330         if (port_id >= nb_ports) {
331                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
332                 return -EINVAL;
333         }
334
335         dev = &rte_eth_devices[port_id];
336         if (rx_queue_id >= dev->data->nb_rx_queues) {
337                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
338                 return -EINVAL;
339         }
340
341         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
342
343         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
344
345 }
346
347 int
348 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
349 {
350         struct rte_eth_dev *dev;
351
352         /* This function is only safe when called from the primary process
353          * in a multi-process setup*/
354         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
355
356         if (port_id >= nb_ports) {
357                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
358                 return -EINVAL;
359         }
360
361         dev = &rte_eth_devices[port_id];
362         if (tx_queue_id >= dev->data->nb_tx_queues) {
363                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
364                 return -EINVAL;
365         }
366
367         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
368
369         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
370
371 }
372
373 int
374 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
375 {
376         struct rte_eth_dev *dev;
377
378         /* This function is only safe when called from the primary process
379          * in a multi-process setup*/
380         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
381
382         if (port_id >= nb_ports) {
383                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
384                 return -EINVAL;
385         }
386
387         dev = &rte_eth_devices[port_id];
388         if (tx_queue_id >= dev->data->nb_tx_queues) {
389                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
390                 return -EINVAL;
391         }
392
393         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
394
395         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
396
397 }
398
399 static int
400 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
401 {
402         uint16_t old_nb_queues = dev->data->nb_tx_queues;
403         void **txq;
404         unsigned i;
405
406         if (dev->data->tx_queues == NULL) { /* first time configuration */
407                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
408                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
409                                 CACHE_LINE_SIZE);
410                 if (dev->data->tx_queues == NULL) {
411                         dev->data->nb_tx_queues = 0;
412                         return -(ENOMEM);
413                 }
414         } else { /* re-configure */
415                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
416
417                 txq = dev->data->tx_queues;
418
419                 for (i = nb_queues; i < old_nb_queues; i++)
420                         (*dev->dev_ops->tx_queue_release)(txq[i]);
421                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
422                                 CACHE_LINE_SIZE);
423                 if (txq == NULL)
424                         return -(ENOMEM);
425
426                 if (nb_queues > old_nb_queues)
427                         memset(txq + old_nb_queues, 0,
428                                 sizeof(txq[0]) * (nb_queues - old_nb_queues));
429
430                 dev->data->tx_queues = txq;
431
432         }
433         dev->data->nb_tx_queues = nb_queues;
434         return (0);
435 }
436
437 static int
438 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
439                       const struct rte_eth_conf *dev_conf)
440 {
441         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
442
443         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
444                 /* check multi-queue mode */
445                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ||
446                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
447                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
448                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
449                         /* SRIOV only works in VMDq enable mode */
450                         PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
451                                         "wrong VMDQ mq_mode rx %u tx %u\n",
452                                         port_id,
453                                         dev_conf->rxmode.mq_mode,
454                                         dev_conf->txmode.mq_mode);
455                         return (-EINVAL);
456                 }
457
458                 switch (dev_conf->rxmode.mq_mode) {
459                 case ETH_MQ_RX_VMDQ_RSS:
460                 case ETH_MQ_RX_VMDQ_DCB:
461                 case ETH_MQ_RX_VMDQ_DCB_RSS:
462                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
463                         PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
464                                         "unsupported VMDQ mq_mode rx %u\n",
465                                         port_id, dev_conf->rxmode.mq_mode);
466                         return (-EINVAL);
467                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
468                         /* if nothing mq mode configure, use default scheme */
469                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
470                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
471                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
472                         break;
473                 }
474
475                 switch (dev_conf->txmode.mq_mode) {
476                 case ETH_MQ_TX_VMDQ_DCB:
477                         /* DCB VMDQ in SRIOV mode, not implement yet */
478                         PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
479                                         "unsupported VMDQ mq_mode tx %u\n",
480                                         port_id, dev_conf->txmode.mq_mode);
481                         return (-EINVAL);
482                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
483                         /* if nothing mq mode configure, use default scheme */
484                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
485                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
486                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
487                         break;
488                 }
489
490                 /* check valid queue number */
491                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
492                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
493                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
494                                     "queue number must less equal to %d\n",
495                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
496                         return (-EINVAL);
497                 }
498         } else {
499                 /* For vmdb+dcb mode check our configuration before we go further */
500                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
501                         const struct rte_eth_vmdq_dcb_conf *conf;
502
503                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
504                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
505                                                 "!= %d\n",
506                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
507                                 return (-EINVAL);
508                         }
509                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
510                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
511                                conf->nb_queue_pools == ETH_32_POOLS)) {
512                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
513                                                 "nb_queue_pools must be %d or %d\n",
514                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
515                                 return (-EINVAL);
516                         }
517                 }
518                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
519                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
520
521                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
522                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
523                                                 "!= %d\n",
524                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
525                                 return (-EINVAL);
526                         }
527                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
528                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
529                                conf->nb_queue_pools == ETH_32_POOLS)) {
530                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
531                                                 "nb_queue_pools != %d or nb_queue_pools "
532                                                 "!= %d\n",
533                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
534                                 return (-EINVAL);
535                         }
536                 }
537
538                 /* For DCB mode check our configuration before we go further */
539                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
540                         const struct rte_eth_dcb_rx_conf *conf;
541
542                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
543                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
544                                                 "!= %d\n",
545                                                 port_id, ETH_DCB_NUM_QUEUES);
546                                 return (-EINVAL);
547                         }
548                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
549                         if (! (conf->nb_tcs == ETH_4_TCS ||
550                                conf->nb_tcs == ETH_8_TCS)) {
551                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
552                                                 "nb_tcs != %d or nb_tcs "
553                                                 "!= %d\n",
554                                                 port_id, ETH_4_TCS, ETH_8_TCS);
555                                 return (-EINVAL);
556                         }
557                 }
558
559                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
560                         const struct rte_eth_dcb_tx_conf *conf;
561
562                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
563                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
564                                                 "!= %d\n",
565                                                 port_id, ETH_DCB_NUM_QUEUES);
566                                 return (-EINVAL);
567                         }
568                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
569                         if (! (conf->nb_tcs == ETH_4_TCS ||
570                                conf->nb_tcs == ETH_8_TCS)) {
571                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
572                                                 "nb_tcs != %d or nb_tcs "
573                                                 "!= %d\n",
574                                                 port_id, ETH_4_TCS, ETH_8_TCS);
575                                 return (-EINVAL);
576                         }
577                 }
578         }
579         return 0;
580 }
581
582 int
583 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
584                       const struct rte_eth_conf *dev_conf)
585 {
586         struct rte_eth_dev *dev;
587         struct rte_eth_dev_info dev_info;
588         int diag;
589
590         /* This function is only safe when called from the primary process
591          * in a multi-process setup*/
592         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
593
594         if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
595                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
596                 return (-EINVAL);
597         }
598         dev = &rte_eth_devices[port_id];
599
600         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
601         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
602
603         if (dev->data->dev_started) {
604                 PMD_DEBUG_TRACE(
605                     "port %d must be stopped to allow configuration\n", port_id);
606                 return (-EBUSY);
607         }
608
609         /*
610          * Check that the numbers of RX and TX queues are not greater
611          * than the maximum number of RX and TX queues supported by the
612          * configured device.
613          */
614         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
615         if (nb_rx_q > dev_info.max_rx_queues) {
616                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
617                                 port_id, nb_rx_q, dev_info.max_rx_queues);
618                 return (-EINVAL);
619         }
620         if (nb_rx_q == 0) {
621                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
622                 return (-EINVAL);
623         }
624
625         if (nb_tx_q > dev_info.max_tx_queues) {
626                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
627                                 port_id, nb_tx_q, dev_info.max_tx_queues);
628                 return (-EINVAL);
629         }
630         if (nb_tx_q == 0) {
631                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
632                 return (-EINVAL);
633         }
634
635         /* Copy the dev_conf parameter into the dev structure */
636         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
637
638         /*
639          * If jumbo frames are enabled, check that the maximum RX packet
640          * length is supported by the configured device.
641          */
642         if (dev_conf->rxmode.jumbo_frame == 1) {
643                 if (dev_conf->rxmode.max_rx_pkt_len >
644                     dev_info.max_rx_pktlen) {
645                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
646                                 " > max valid value %u\n",
647                                 port_id,
648                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
649                                 (unsigned)dev_info.max_rx_pktlen);
650                         return (-EINVAL);
651                 }
652                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
653                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
654                                 " < min valid value %u\n",
655                                 port_id,
656                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
657                                 (unsigned)ETHER_MIN_LEN);
658                         return (-EINVAL);
659                 }
660         } else
661                 /* Use default value */
662                 dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
663
664         /* multipe queue mode checking */
665         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
666         if (diag != 0) {
667                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
668                                 port_id, diag);
669                 return diag;
670         }
671
672         /*
673          * Setup new number of RX/TX queues and reconfigure device.
674          */
675         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
676         if (diag != 0) {
677                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
678                                 port_id, diag);
679                 return diag;
680         }
681
682         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
683         if (diag != 0) {
684                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
685                                 port_id, diag);
686                 rte_eth_dev_rx_queue_config(dev, 0);
687                 return diag;
688         }
689
690         diag = (*dev->dev_ops->dev_configure)(dev);
691         if (diag != 0) {
692                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
693                                 port_id, diag);
694                 rte_eth_dev_rx_queue_config(dev, 0);
695                 rte_eth_dev_tx_queue_config(dev, 0);
696                 return diag;
697         }
698
699         return 0;
700 }
701
702 static void
703 rte_eth_dev_config_restore(uint8_t port_id)
704 {
705         struct rte_eth_dev *dev;
706         struct rte_eth_dev_info dev_info;
707         struct ether_addr addr;
708         uint16_t i;
709         uint32_t pool = 0;
710
711         dev = &rte_eth_devices[port_id];
712
713         rte_eth_dev_info_get(port_id, &dev_info);
714
715         if (RTE_ETH_DEV_SRIOV(dev).active)
716                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
717
718         /* replay MAC address configuration */
719         for (i = 0; i < dev_info.max_mac_addrs; i++) {
720                 addr = dev->data->mac_addrs[i];
721
722                 /* skip zero address */
723                 if (is_zero_ether_addr(&addr))
724                         continue;
725
726                 /* add address to the hardware */
727                 if  (*dev->dev_ops->mac_addr_add)
728                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
729                 else {
730                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
731                                         port_id);
732                         /* exit the loop but not return an error */
733                         break;
734                 }
735         }
736
737         /* replay promiscuous configuration */
738         if (rte_eth_promiscuous_get(port_id) == 1)
739                 rte_eth_promiscuous_enable(port_id);
740         else if (rte_eth_promiscuous_get(port_id) == 0)
741                 rte_eth_promiscuous_disable(port_id);
742
743         /* replay allmulticast configuration */
744         if (rte_eth_allmulticast_get(port_id) == 1)
745                 rte_eth_allmulticast_enable(port_id);
746         else if (rte_eth_allmulticast_get(port_id) == 0)
747                 rte_eth_allmulticast_disable(port_id);
748 }
749
750 int
751 rte_eth_dev_start(uint8_t port_id)
752 {
753         struct rte_eth_dev *dev;
754         int diag;
755
756         /* This function is only safe when called from the primary process
757          * in a multi-process setup*/
758         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
759
760         if (port_id >= nb_ports) {
761                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
762                 return (-EINVAL);
763         }
764         dev = &rte_eth_devices[port_id];
765
766         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
767
768         if (dev->data->dev_started != 0) {
769                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
770                         " already started\n",
771                         port_id);
772                 return (0);
773         }
774
775         diag = (*dev->dev_ops->dev_start)(dev);
776         if (diag == 0)
777                 dev->data->dev_started = 1;
778         else
779                 return diag;
780
781         rte_eth_dev_config_restore(port_id);
782
783         return 0;
784 }
785
786 void
787 rte_eth_dev_stop(uint8_t port_id)
788 {
789         struct rte_eth_dev *dev;
790
791         /* This function is only safe when called from the primary process
792          * in a multi-process setup*/
793         PROC_PRIMARY_OR_RET();
794
795         if (port_id >= nb_ports) {
796                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
797                 return;
798         }
799         dev = &rte_eth_devices[port_id];
800
801         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
802
803         if (dev->data->dev_started == 0) {
804                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
805                         " already stopped\n",
806                         port_id);
807                 return;
808         }
809
810         dev->data->dev_started = 0;
811         (*dev->dev_ops->dev_stop)(dev);
812 }
813
814 void
815 rte_eth_dev_close(uint8_t port_id)
816 {
817         struct rte_eth_dev *dev;
818
819         /* This function is only safe when called from the primary process
820          * in a multi-process setup*/
821         PROC_PRIMARY_OR_RET();
822
823         if (port_id >= nb_ports) {
824                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
825                 return;
826         }
827
828         dev = &rte_eth_devices[port_id];
829
830         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
831         dev->data->dev_started = 0;
832         (*dev->dev_ops->dev_close)(dev);
833 }
834
835 int
836 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
837                        uint16_t nb_rx_desc, unsigned int socket_id,
838                        const struct rte_eth_rxconf *rx_conf,
839                        struct rte_mempool *mp)
840 {
841         struct rte_eth_dev *dev;
842         struct rte_pktmbuf_pool_private *mbp_priv;
843         struct rte_eth_dev_info dev_info;
844
845         /* This function is only safe when called from the primary process
846          * in a multi-process setup*/
847         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
848
849         if (port_id >= nb_ports) {
850                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
851                 return (-EINVAL);
852         }
853         dev = &rte_eth_devices[port_id];
854         if (rx_queue_id >= dev->data->nb_rx_queues) {
855                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
856                 return (-EINVAL);
857         }
858
859         if (dev->data->dev_started) {
860                 PMD_DEBUG_TRACE(
861                     "port %d must be stopped to allow configuration\n", port_id);
862                 return -EBUSY;
863         }
864
865         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
866         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
867
868         /*
869          * Check the size of the mbuf data buffer.
870          * This value must be provided in the private data of the memory pool.
871          * First check that the memory pool has a valid private data.
872          */
873         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
874         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
875                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
876                                 mp->name, (int) mp->private_data_size,
877                                 (int) sizeof(struct rte_pktmbuf_pool_private));
878                 return (-ENOSPC);
879         }
880         mbp_priv = rte_mempool_get_priv(mp);
881         if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
882             dev_info.min_rx_bufsize) {
883                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
884                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
885                                 "=%d)\n",
886                                 mp->name,
887                                 (int)mbp_priv->mbuf_data_room_size,
888                                 (int)(RTE_PKTMBUF_HEADROOM +
889                                       dev_info.min_rx_bufsize),
890                                 (int)RTE_PKTMBUF_HEADROOM,
891                                 (int)dev_info.min_rx_bufsize);
892                 return (-EINVAL);
893         }
894
895         return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
896                                                socket_id, rx_conf, mp);
897 }
898
899 int
900 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
901                        uint16_t nb_tx_desc, unsigned int socket_id,
902                        const struct rte_eth_txconf *tx_conf)
903 {
904         struct rte_eth_dev *dev;
905
906         /* This function is only safe when called from the primary process
907          * in a multi-process setup*/
908         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
909
910         if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
911                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
912                 return (-EINVAL);
913         }
914         dev = &rte_eth_devices[port_id];
915         if (tx_queue_id >= dev->data->nb_tx_queues) {
916                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
917                 return (-EINVAL);
918         }
919
920         if (dev->data->dev_started) {
921                 PMD_DEBUG_TRACE(
922                     "port %d must be stopped to allow configuration\n", port_id);
923                 return -EBUSY;
924         }
925
926         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
927         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
928                                                socket_id, tx_conf);
929 }
930
931 void
932 rte_eth_promiscuous_enable(uint8_t port_id)
933 {
934         struct rte_eth_dev *dev;
935
936         if (port_id >= nb_ports) {
937                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
938                 return;
939         }
940         dev = &rte_eth_devices[port_id];
941
942         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
943         (*dev->dev_ops->promiscuous_enable)(dev);
944         dev->data->promiscuous = 1;
945 }
946
947 void
948 rte_eth_promiscuous_disable(uint8_t port_id)
949 {
950         struct rte_eth_dev *dev;
951
952         if (port_id >= nb_ports) {
953                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
954                 return;
955         }
956         dev = &rte_eth_devices[port_id];
957
958         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
959         dev->data->promiscuous = 0;
960         (*dev->dev_ops->promiscuous_disable)(dev);
961 }
962
963 int
964 rte_eth_promiscuous_get(uint8_t port_id)
965 {
966         struct rte_eth_dev *dev;
967
968         if (port_id >= nb_ports) {
969                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
970                 return -1;
971         }
972
973         dev = &rte_eth_devices[port_id];
974         return dev->data->promiscuous;
975 }
976
977 void
978 rte_eth_allmulticast_enable(uint8_t port_id)
979 {
980         struct rte_eth_dev *dev;
981
982         if (port_id >= nb_ports) {
983                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
984                 return;
985         }
986         dev = &rte_eth_devices[port_id];
987
988         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
989         (*dev->dev_ops->allmulticast_enable)(dev);
990         dev->data->all_multicast = 1;
991 }
992
993 void
994 rte_eth_allmulticast_disable(uint8_t port_id)
995 {
996         struct rte_eth_dev *dev;
997
998         if (port_id >= nb_ports) {
999                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1000                 return;
1001         }
1002         dev = &rte_eth_devices[port_id];
1003
1004         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1005         dev->data->all_multicast = 0;
1006         (*dev->dev_ops->allmulticast_disable)(dev);
1007 }
1008
1009 int
1010 rte_eth_allmulticast_get(uint8_t port_id)
1011 {
1012         struct rte_eth_dev *dev;
1013
1014         if (port_id >= nb_ports) {
1015                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1016                 return -1;
1017         }
1018
1019         dev = &rte_eth_devices[port_id];
1020         return dev->data->all_multicast;
1021 }
1022
1023 static inline int
1024 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1025                                 struct rte_eth_link *link)
1026 {
1027         struct rte_eth_link *dst = link;
1028         struct rte_eth_link *src = &(dev->data->dev_link);
1029
1030         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1031                                         *(uint64_t *)src) == 0)
1032                 return -1;
1033
1034         return 0;
1035 }
1036
1037 void
1038 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1039 {
1040         struct rte_eth_dev *dev;
1041
1042         if (port_id >= nb_ports) {
1043                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1044                 return;
1045         }
1046         dev = &rte_eth_devices[port_id];
1047         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1048
1049         if (dev->data->dev_conf.intr_conf.lsc != 0)
1050                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1051         else {
1052                 (*dev->dev_ops->link_update)(dev, 1);
1053                 *eth_link = dev->data->dev_link;
1054         }
1055 }
1056
1057 void
1058 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1059 {
1060         struct rte_eth_dev *dev;
1061
1062         if (port_id >= nb_ports) {
1063                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1064                 return;
1065         }
1066         dev = &rte_eth_devices[port_id];
1067         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1068
1069         if (dev->data->dev_conf.intr_conf.lsc != 0)
1070                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1071         else {
1072                 (*dev->dev_ops->link_update)(dev, 0);
1073                 *eth_link = dev->data->dev_link;
1074         }
1075 }
1076
1077 void
1078 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1079 {
1080         struct rte_eth_dev *dev;
1081
1082         if (port_id >= nb_ports) {
1083                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1084                 return;
1085         }
1086         dev = &rte_eth_devices[port_id];
1087         memset(stats, 0, sizeof(*stats));
1088
1089         FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
1090         (*dev->dev_ops->stats_get)(dev, stats);
1091         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1092 }
1093
1094 void
1095 rte_eth_stats_reset(uint8_t port_id)
1096 {
1097         struct rte_eth_dev *dev;
1098
1099         if (port_id >= nb_ports) {
1100                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1101                 return;
1102         }
1103         dev = &rte_eth_devices[port_id];
1104
1105         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1106         (*dev->dev_ops->stats_reset)(dev);
1107 }
1108
1109
1110 static int
1111 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1112                 uint8_t is_rx)
1113 {
1114         struct rte_eth_dev *dev;
1115
1116         if (port_id >= nb_ports) {
1117                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1118                 return -ENODEV;
1119         }
1120         dev = &rte_eth_devices[port_id];
1121
1122         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1123         return (*dev->dev_ops->queue_stats_mapping_set)
1124                         (dev, queue_id, stat_idx, is_rx);
1125 }
1126
1127
1128 int
1129 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1130                 uint8_t stat_idx)
1131 {
1132         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1133                         STAT_QMAP_TX);
1134 }
1135
1136
1137 int
1138 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1139                 uint8_t stat_idx)
1140 {
1141         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1142                         STAT_QMAP_RX);
1143 }
1144
1145
1146 void
1147 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1148 {
1149         struct rte_eth_dev *dev;
1150
1151         if (port_id >= nb_ports) {
1152                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1153                 return;
1154         }
1155         dev = &rte_eth_devices[port_id];
1156
1157         /* Default device offload capabilities to zero */
1158         dev_info->rx_offload_capa = 0;
1159         dev_info->tx_offload_capa = 0;
1160         dev_info->if_index = 0;
1161         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1162         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1163         dev_info->pci_dev = dev->pci_dev;
1164         if (dev->driver)
1165                 dev_info->driver_name = dev->driver->pci_drv.name;
1166 }
1167
1168 void
1169 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1170 {
1171         struct rte_eth_dev *dev;
1172
1173         if (port_id >= nb_ports) {
1174                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1175                 return;
1176         }
1177         dev = &rte_eth_devices[port_id];
1178         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1179 }
1180
1181 int
1182 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1183 {
1184         struct rte_eth_dev *dev;
1185
1186         if (port_id >= nb_ports) {
1187                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1188                 return (-ENODEV);
1189         }
1190         dev = &rte_eth_devices[port_id];
1191         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1192                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1193                 return (-ENOSYS);
1194         }
1195
1196         if (vlan_id > 4095) {
1197                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1198                                 port_id, (unsigned) vlan_id);
1199                 return (-EINVAL);
1200         }
1201         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1202         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1203         return (0);
1204 }
1205
1206 int
1207 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1208 {
1209         struct rte_eth_dev *dev;
1210
1211         if (port_id >= nb_ports) {
1212                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1213                 return (-ENODEV);
1214         }
1215
1216         dev = &rte_eth_devices[port_id];
1217         if (rx_queue_id >= dev->data->nb_rx_queues) {
1218                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1219                 return (-EINVAL);
1220         }
1221
1222         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1223         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1224
1225         return (0);
1226 }
1227
1228 int
1229 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1230 {
1231         struct rte_eth_dev *dev;
1232
1233         if (port_id >= nb_ports) {
1234                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1235                 return (-ENODEV);
1236         }
1237
1238         dev = &rte_eth_devices[port_id];
1239         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1240         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1241
1242         return (0);
1243 }
1244
1245 int
1246 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1247 {
1248         struct rte_eth_dev *dev;
1249         int ret = 0;
1250         int mask = 0;
1251         int cur, org = 0;
1252
1253         if (port_id >= nb_ports) {
1254                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1255                 return (-ENODEV);
1256         }
1257
1258         dev = &rte_eth_devices[port_id];
1259
1260         /*check which option changed by application*/
1261         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1262         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1263         if (cur != org){
1264                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1265                 mask |= ETH_VLAN_STRIP_MASK;
1266         }
1267
1268         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1269         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1270         if (cur != org){
1271                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1272                 mask |= ETH_VLAN_FILTER_MASK;
1273         }
1274
1275         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1276         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1277         if (cur != org){
1278                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1279                 mask |= ETH_VLAN_EXTEND_MASK;
1280         }
1281
1282         /*no change*/
1283         if(mask == 0)
1284                 return ret;
1285
1286         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1287         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1288
1289         return ret;
1290 }
1291
1292 int
1293 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1294 {
1295         struct rte_eth_dev *dev;
1296         int ret = 0;
1297
1298         if (port_id >= nb_ports) {
1299                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1300                 return (-ENODEV);
1301         }
1302
1303         dev = &rte_eth_devices[port_id];
1304
1305         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1306                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1307
1308         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1309                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1310
1311         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1312                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1313
1314         return ret;
1315 }
1316
1317
1318 int
1319 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1320                                       struct rte_fdir_filter *fdir_filter,
1321                                       uint8_t queue)
1322 {
1323         struct rte_eth_dev *dev;
1324
1325         if (port_id >= nb_ports) {
1326                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1327                 return (-ENODEV);
1328         }
1329
1330         dev = &rte_eth_devices[port_id];
1331
1332         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1333                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1334                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1335                 return (-ENOSYS);
1336         }
1337
1338         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1339              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1340             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1341                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1342                                 "None l4type, source & destinations ports " \
1343                                 "should be null!\n");
1344                 return (-EINVAL);
1345         }
1346
1347         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1348         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1349                                                                 queue);
1350 }
1351
1352 int
1353 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1354                                          struct rte_fdir_filter *fdir_filter,
1355                                          uint8_t queue)
1356 {
1357         struct rte_eth_dev *dev;
1358
1359         if (port_id >= nb_ports) {
1360                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1361                 return (-ENODEV);
1362         }
1363
1364         dev = &rte_eth_devices[port_id];
1365
1366         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1367                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1368                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1369                 return (-ENOSYS);
1370         }
1371
1372         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1373              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1374             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1375                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1376                                 "None l4type, source & destinations ports " \
1377                                 "should be null!\n");
1378                 return (-EINVAL);
1379         }
1380
1381         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1382         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1383                                                                 queue);
1384
1385 }
1386
1387 int
1388 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1389                                          struct rte_fdir_filter *fdir_filter)
1390 {
1391         struct rte_eth_dev *dev;
1392
1393         if (port_id >= nb_ports) {
1394                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1395                 return (-ENODEV);
1396         }
1397
1398         dev = &rte_eth_devices[port_id];
1399
1400         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1401                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1402                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1403                 return (-ENOSYS);
1404         }
1405
1406         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1407              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1408             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1409                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1410                                 "None l4type source & destinations ports " \
1411                                 "should be null!\n");
1412                 return (-EINVAL);
1413         }
1414
1415         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1416         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1417 }
1418
1419 int
1420 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1421 {
1422         struct rte_eth_dev *dev;
1423
1424         if (port_id >= nb_ports) {
1425                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1426                 return (-ENODEV);
1427         }
1428
1429         dev = &rte_eth_devices[port_id];
1430         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1431                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1432                 return (-ENOSYS);
1433         }
1434
1435         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1436
1437         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1438         return (0);
1439 }
1440
1441 int
1442 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1443                                     struct rte_fdir_filter *fdir_filter,
1444                                     uint16_t soft_id, uint8_t queue,
1445                                     uint8_t drop)
1446 {
1447         struct rte_eth_dev *dev;
1448
1449         if (port_id >= nb_ports) {
1450                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1451                 return (-ENODEV);
1452         }
1453
1454         dev = &rte_eth_devices[port_id];
1455
1456         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1457                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1458                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1459                 return (-ENOSYS);
1460         }
1461
1462         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1463              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1464             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1465                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1466                                 "None l4type, source & destinations ports " \
1467                                 "should be null!\n");
1468                 return (-EINVAL);
1469         }
1470
1471         /* For now IPv6 is not supported with perfect filter */
1472         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1473                 return (-ENOTSUP);
1474
1475         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1476         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1477                                                                 soft_id, queue,
1478                                                                 drop);
1479 }
1480
1481 int
1482 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1483                                        struct rte_fdir_filter *fdir_filter,
1484                                        uint16_t soft_id, uint8_t queue,
1485                                        uint8_t drop)
1486 {
1487         struct rte_eth_dev *dev;
1488
1489         if (port_id >= nb_ports) {
1490                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1491                 return (-ENODEV);
1492         }
1493
1494         dev = &rte_eth_devices[port_id];
1495
1496         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1497                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1498                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1499                 return (-ENOSYS);
1500         }
1501
1502         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1503              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1504             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1505                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1506                                 "None l4type, source & destinations ports " \
1507                                 "should be null!\n");
1508                 return (-EINVAL);
1509         }
1510
1511         /* For now IPv6 is not supported with perfect filter */
1512         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1513                 return (-ENOTSUP);
1514
1515         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1516         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1517                                                         soft_id, queue, drop);
1518 }
1519
1520 int
1521 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1522                                        struct rte_fdir_filter *fdir_filter,
1523                                        uint16_t soft_id)
1524 {
1525         struct rte_eth_dev *dev;
1526
1527         if (port_id >= nb_ports) {
1528                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1529                 return (-ENODEV);
1530         }
1531
1532         dev = &rte_eth_devices[port_id];
1533
1534         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1535                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1536                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1537                 return (-ENOSYS);
1538         }
1539
1540         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1541              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1542             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1543                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1544                                 "None l4type, source & destinations ports " \
1545                                 "should be null!\n");
1546                 return (-EINVAL);
1547         }
1548
1549         /* For now IPv6 is not supported with perfect filter */
1550         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1551                 return (-ENOTSUP);
1552
1553         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1554         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
1555                                                                 soft_id);
1556 }
1557
1558 int
1559 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1560 {
1561         struct rte_eth_dev *dev;
1562
1563         if (port_id >= nb_ports) {
1564                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1565                 return (-ENODEV);
1566         }
1567
1568         dev = &rte_eth_devices[port_id];
1569         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1570                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1571                 return (-ENOSYS);
1572         }
1573
1574         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1575         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1576 }
1577
1578 int
1579 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1580 {
1581         struct rte_eth_dev *dev;
1582
1583         if (port_id >= nb_ports) {
1584                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1585                 return (-ENODEV);
1586         }
1587
1588         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1589                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1590                 return (-EINVAL);
1591         }
1592
1593         dev = &rte_eth_devices[port_id];
1594         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1595         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1596 }
1597
1598 int
1599 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1600 {
1601         struct rte_eth_dev *dev;
1602
1603         if (port_id >= nb_ports) {
1604                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1605                 return (-ENODEV);
1606         }
1607
1608         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1609                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1610                 return (-EINVAL);
1611         }
1612
1613         dev = &rte_eth_devices[port_id];
1614         /* High water, low water validation are device specific */
1615         if  (*dev->dev_ops->priority_flow_ctrl_set)
1616                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1617         return (-ENOTSUP);
1618 }
1619
1620 int
1621 rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1622 {
1623         struct rte_eth_dev *dev;
1624         uint16_t max_rxq;
1625         uint8_t i,j;
1626
1627         if (port_id >= nb_ports) {
1628                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1629                 return (-ENODEV);
1630         }
1631
1632         /* Invalid mask bit(s) setting */
1633         if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1634                 PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
1635                 return (-EINVAL);
1636         }
1637
1638         dev = &rte_eth_devices[port_id];
1639         max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ?
1640                 dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE;
1641         if (reta_conf->mask_lo != 0) {
1642                 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1643                         if ((reta_conf->mask_lo & (1ULL << i)) &&
1644                                 (reta_conf->reta[i] >= max_rxq)) {
1645                                 PMD_DEBUG_TRACE("RETA hash index output"
1646                                         "configration for port=%d,invalid"
1647                                         "queue=%d\n",port_id,reta_conf->reta[i]);
1648
1649                                 return (-EINVAL);
1650                         }
1651                 }
1652         }
1653
1654         if (reta_conf->mask_hi != 0) {
1655                 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1656                         j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
1657
1658                         /* Check if the max entry >= 128 */
1659                         if ((reta_conf->mask_hi & (1ULL << i)) &&
1660                                 (reta_conf->reta[j] >= max_rxq)) {
1661                                 PMD_DEBUG_TRACE("RETA hash index output"
1662                                         "configration for port=%d,invalid"
1663                                         "queue=%d\n",port_id,reta_conf->reta[j]);
1664
1665                                 return (-EINVAL);
1666                         }
1667                 }
1668         }
1669
1670         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1671         return (*dev->dev_ops->reta_update)(dev, reta_conf);
1672 }
1673
1674 int
1675 rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1676 {
1677         struct rte_eth_dev *dev;
1678
1679         if (port_id >= nb_ports) {
1680                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1681                 return (-ENODEV);
1682         }
1683
1684         if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1685                 PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
1686                 return (-EINVAL);
1687         }
1688
1689         dev = &rte_eth_devices[port_id];
1690         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1691         return (*dev->dev_ops->reta_query)(dev, reta_conf);
1692 }
1693
1694 int
1695 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1696 {
1697         struct rte_eth_dev *dev;
1698         uint16_t rss_hash_protos;
1699
1700         if (port_id >= nb_ports) {
1701                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1702                 return (-ENODEV);
1703         }
1704         rss_hash_protos = rss_conf->rss_hf;
1705         if ((rss_hash_protos != 0) &&
1706             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1707                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1708                                 rss_hash_protos);
1709                 return (-EINVAL);
1710         }
1711         dev = &rte_eth_devices[port_id];
1712         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1713         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1714 }
1715
1716 int
1717 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1718                               struct rte_eth_rss_conf *rss_conf)
1719 {
1720         struct rte_eth_dev *dev;
1721
1722         if (port_id >= nb_ports) {
1723                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1724                 return (-ENODEV);
1725         }
1726         dev = &rte_eth_devices[port_id];
1727         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1728         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1729 }
1730
1731 int
1732 rte_eth_led_on(uint8_t port_id)
1733 {
1734         struct rte_eth_dev *dev;
1735
1736         if (port_id >= nb_ports) {
1737                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1738                 return (-ENODEV);
1739         }
1740
1741         dev = &rte_eth_devices[port_id];
1742         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1743         return ((*dev->dev_ops->dev_led_on)(dev));
1744 }
1745
1746 int
1747 rte_eth_led_off(uint8_t port_id)
1748 {
1749         struct rte_eth_dev *dev;
1750
1751         if (port_id >= nb_ports) {
1752                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1753                 return (-ENODEV);
1754         }
1755
1756         dev = &rte_eth_devices[port_id];
1757         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1758         return ((*dev->dev_ops->dev_led_off)(dev));
1759 }
1760
1761 /*
1762  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1763  * an empty spot.
1764  */
1765 static inline int
1766 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1767 {
1768         struct rte_eth_dev_info dev_info;
1769         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1770         unsigned i;
1771
1772         rte_eth_dev_info_get(port_id, &dev_info);
1773
1774         for (i = 0; i < dev_info.max_mac_addrs; i++)
1775                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1776                         return i;
1777
1778         return -1;
1779 }
1780
1781 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
1782
1783 int
1784 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
1785                         uint32_t pool)
1786 {
1787         struct rte_eth_dev *dev;
1788         int index;
1789         uint64_t pool_mask;
1790
1791         if (port_id >= nb_ports) {
1792                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1793                 return (-ENODEV);
1794         }
1795         dev = &rte_eth_devices[port_id];
1796         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
1797
1798         if (is_zero_ether_addr(addr)) {
1799                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
1800                         port_id);
1801                 return (-EINVAL);
1802         }
1803         if (pool >= ETH_64_POOLS) {
1804                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
1805                 return (-EINVAL);
1806         }
1807
1808         index = get_mac_addr_index(port_id, addr);
1809         if (index < 0) {
1810                 index = get_mac_addr_index(port_id, &null_mac_addr);
1811                 if (index < 0) {
1812                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1813                                 port_id);
1814                         return (-ENOSPC);
1815                 }
1816         } else {
1817                 pool_mask = dev->data->mac_pool_sel[index];
1818
1819                 /* Check if both MAC address and pool is alread there, and do nothing */
1820                 if (pool_mask & (1ULL << pool))
1821                         return 0;
1822         }
1823
1824         /* Update NIC */
1825         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
1826
1827         /* Update address in NIC data structure */
1828         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
1829
1830         /* Update pool bitmap in NIC data structure */
1831         dev->data->mac_pool_sel[index] |= (1ULL << pool);
1832
1833         return 0;
1834 }
1835
1836 int
1837 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
1838 {
1839         struct rte_eth_dev *dev;
1840         int index;
1841
1842         if (port_id >= nb_ports) {
1843                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1844                 return (-ENODEV);
1845         }
1846         dev = &rte_eth_devices[port_id];
1847         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
1848
1849         index = get_mac_addr_index(port_id, addr);
1850         if (index == 0) {
1851                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
1852                 return (-EADDRINUSE);
1853         } else if (index < 0)
1854                 return 0;  /* Do nothing if address wasn't found */
1855
1856         /* Update NIC */
1857         (*dev->dev_ops->mac_addr_remove)(dev, index);
1858
1859         /* Update address in NIC data structure */
1860         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
1861
1862         return 0;
1863 }
1864
1865 int
1866 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
1867                                 uint16_t rx_mode, uint8_t on)
1868 {
1869         uint16_t num_vfs;
1870         struct rte_eth_dev *dev;
1871         struct rte_eth_dev_info dev_info;
1872
1873         if (port_id >= nb_ports) {
1874                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
1875                                 port_id);
1876                 return (-ENODEV);
1877         }
1878
1879         dev = &rte_eth_devices[port_id];
1880         rte_eth_dev_info_get(port_id, &dev_info);
1881
1882         num_vfs = dev_info.max_vfs;
1883         if (vf > num_vfs)
1884         {
1885                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
1886                 return (-EINVAL);
1887         }
1888         if (rx_mode == 0)
1889         {
1890                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
1891                 return (-EINVAL);
1892         }
1893         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
1894         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
1895 }
1896
1897 /*
1898  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1899  * an empty spot.
1900  */
1901 static inline int
1902 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1903 {
1904         struct rte_eth_dev_info dev_info;
1905         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1906         unsigned i;
1907
1908         rte_eth_dev_info_get(port_id, &dev_info);
1909         if (!dev->data->hash_mac_addrs)
1910                 return -1;
1911
1912         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
1913                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
1914                         ETHER_ADDR_LEN) == 0)
1915                         return i;
1916
1917         return -1;
1918 }
1919
1920 int
1921 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
1922                                 uint8_t on)
1923 {
1924         int index;
1925         int ret;
1926         struct rte_eth_dev *dev;
1927
1928         if (port_id >= nb_ports) {
1929                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
1930                         port_id);
1931                 return (-ENODEV);
1932         }
1933
1934         dev = &rte_eth_devices[port_id];
1935         if (is_zero_ether_addr(addr)) {
1936                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
1937                         port_id);
1938                 return (-EINVAL);
1939         }
1940
1941         index = get_hash_mac_addr_index(port_id, addr);
1942         /* Check if it's already there, and do nothing */
1943         if ((index >= 0) && (on))
1944                 return 0;
1945
1946         if (index < 0) {
1947                 if (!on) {
1948                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
1949                                 "set in UTA\n", port_id);
1950                         return (-EINVAL);
1951                 }
1952
1953                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
1954                 if (index < 0) {
1955                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1956                                         port_id);
1957                         return (-ENOSPC);
1958                 }
1959         }
1960
1961         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
1962         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
1963         if (ret == 0) {
1964                 /* Update address in NIC data structure */
1965                 if (on)
1966                         ether_addr_copy(addr,
1967                                         &dev->data->hash_mac_addrs[index]);
1968                 else
1969                         ether_addr_copy(&null_mac_addr,
1970                                         &dev->data->hash_mac_addrs[index]);
1971         }
1972
1973         return ret;
1974 }
1975
1976 int
1977 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
1978 {
1979         struct rte_eth_dev *dev;
1980
1981         if (port_id >= nb_ports) {
1982                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
1983                         port_id);
1984                 return (-ENODEV);
1985         }
1986
1987         dev = &rte_eth_devices[port_id];
1988
1989         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
1990         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
1991 }
1992
1993 int
1994 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
1995 {
1996         uint16_t num_vfs;
1997         struct rte_eth_dev *dev;
1998         struct rte_eth_dev_info dev_info;
1999
2000         if (port_id >= nb_ports) {
2001                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2002                 return (-ENODEV);
2003         }
2004
2005         dev = &rte_eth_devices[port_id];
2006         rte_eth_dev_info_get(port_id, &dev_info);
2007
2008         num_vfs = dev_info.max_vfs;
2009         if (vf > num_vfs)
2010         {
2011                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2012                 return (-EINVAL);
2013         }
2014
2015         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2016         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2017 }
2018
2019 int
2020 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2021 {
2022         uint16_t num_vfs;
2023         struct rte_eth_dev *dev;
2024         struct rte_eth_dev_info dev_info;
2025
2026         if (port_id >= nb_ports) {
2027                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2028                 return (-ENODEV);
2029         }
2030
2031         dev = &rte_eth_devices[port_id];
2032         rte_eth_dev_info_get(port_id, &dev_info);
2033
2034         num_vfs = dev_info.max_vfs;
2035         if (vf > num_vfs)
2036         {
2037                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2038                 return (-EINVAL);
2039         }
2040
2041         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2042         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2043 }
2044
2045 int
2046 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2047                                  uint64_t vf_mask,uint8_t vlan_on)
2048 {
2049         struct rte_eth_dev *dev;
2050
2051         if (port_id >= nb_ports) {
2052                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2053                                 port_id);
2054                 return (-ENODEV);
2055         }
2056         dev = &rte_eth_devices[port_id];
2057
2058         if(vlan_id > ETHER_MAX_VLAN_ID)
2059         {
2060                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2061                         vlan_id);
2062                 return (-EINVAL);
2063         }
2064         if (vf_mask == 0)
2065         {
2066                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2067                 return (-EINVAL);
2068         }
2069
2070         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2071         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2072                                                 vf_mask,vlan_on);
2073 }
2074
2075 int
2076 rte_eth_mirror_rule_set(uint8_t port_id,
2077                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
2078                         uint8_t rule_id, uint8_t on)
2079 {
2080         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2081
2082         if (port_id >= nb_ports) {
2083                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2084                 return (-ENODEV);
2085         }
2086
2087         if (mirror_conf->rule_type_mask == 0) {
2088                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2089                 return (-EINVAL);
2090         }
2091
2092         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2093                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
2094                         "be 0-%d\n",ETH_64_POOLS - 1);
2095                 return (-EINVAL);
2096         }
2097
2098         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
2099                 (mirror_conf->pool_mask == 0)) {
2100                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
2101                                 "be 0.\n");
2102                 return (-EINVAL);
2103         }
2104
2105         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2106         {
2107                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2108                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
2109                 return (-EINVAL);
2110         }
2111
2112         dev = &rte_eth_devices[port_id];
2113         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2114
2115         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2116 }
2117
2118 int
2119 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2120 {
2121         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2122
2123         if (port_id >= nb_ports) {
2124                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2125                 return (-ENODEV);
2126         }
2127
2128         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2129         {
2130                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2131                         ETH_VMDQ_NUM_MIRROR_RULE-1);
2132                 return (-EINVAL);
2133         }
2134
2135         dev = &rte_eth_devices[port_id];
2136         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2137
2138         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2139 }
2140
2141 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2142 uint16_t
2143 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2144                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2145 {
2146         struct rte_eth_dev *dev;
2147
2148         if (port_id >= nb_ports) {
2149                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2150                 return 0;
2151         }
2152         dev = &rte_eth_devices[port_id];
2153         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
2154         if (queue_id >= dev->data->nb_rx_queues) {
2155                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2156                 return 0;
2157         }
2158         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2159                                                 rx_pkts, nb_pkts);
2160 }
2161
2162 uint16_t
2163 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2164                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2165 {
2166         struct rte_eth_dev *dev;
2167
2168         if (port_id >= nb_ports) {
2169                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2170                 return 0;
2171         }
2172         dev = &rte_eth_devices[port_id];
2173
2174         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
2175         if (queue_id >= dev->data->nb_tx_queues) {
2176                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2177                 return 0;
2178         }
2179         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2180                                                 tx_pkts, nb_pkts);
2181 }
2182
2183 uint32_t
2184 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2185 {
2186         struct rte_eth_dev *dev;
2187
2188         if (port_id >= nb_ports) {
2189                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2190                 return 0;
2191         }
2192         dev = &rte_eth_devices[port_id];
2193         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2194         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2195 }
2196
2197 int
2198 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2199 {
2200         struct rte_eth_dev *dev;
2201
2202         if (port_id >= nb_ports) {
2203                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2204                 return (-ENODEV);
2205         }
2206         dev = &rte_eth_devices[port_id];
2207         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2208         return (*dev->dev_ops->rx_descriptor_done)( \
2209                 dev->data->rx_queues[queue_id], offset);
2210 }
2211 #endif
2212
2213 int
2214 rte_eth_dev_callback_register(uint8_t port_id,
2215                         enum rte_eth_event_type event,
2216                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2217 {
2218         struct rte_eth_dev *dev;
2219         struct rte_eth_dev_callback *user_cb;
2220
2221         if (!cb_fn)
2222                 return (-EINVAL);
2223         if (port_id >= nb_ports) {
2224                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2225                 return (-EINVAL);
2226         }
2227
2228         dev = &rte_eth_devices[port_id];
2229         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2230
2231         TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
2232                 if (user_cb->cb_fn == cb_fn &&
2233                         user_cb->cb_arg == cb_arg &&
2234                         user_cb->event == event) {
2235                         break;
2236                 }
2237         }
2238
2239         /* create a new callback. */
2240         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2241                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2242                 user_cb->cb_fn = cb_fn;
2243                 user_cb->cb_arg = cb_arg;
2244                 user_cb->event = event;
2245                 TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
2246         }
2247
2248         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2249         return ((user_cb == NULL) ? -ENOMEM : 0);
2250 }
2251
2252 int
2253 rte_eth_dev_callback_unregister(uint8_t port_id,
2254                         enum rte_eth_event_type event,
2255                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2256 {
2257         int ret;
2258         struct rte_eth_dev *dev;
2259         struct rte_eth_dev_callback *cb, *next;
2260
2261         if (!cb_fn)
2262                 return (-EINVAL);
2263         if (port_id >= nb_ports) {
2264                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2265                 return (-EINVAL);
2266         }
2267
2268         dev = &rte_eth_devices[port_id];
2269         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2270
2271         ret = 0;
2272         for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
2273
2274                 next = TAILQ_NEXT(cb, next);
2275
2276                 if (cb->cb_fn != cb_fn || cb->event != event ||
2277                                 (cb->cb_arg != (void *)-1 &&
2278                                 cb->cb_arg != cb_arg))
2279                         continue;
2280
2281                 /*
2282                  * if this callback is not executing right now,
2283                  * then remove it.
2284                  */
2285                 if (cb->active == 0) {
2286                         TAILQ_REMOVE(&(dev->callbacks), cb, next);
2287                         rte_free(cb);
2288                 } else {
2289                         ret = -EAGAIN;
2290                 }
2291         }
2292
2293         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2294         return (ret);
2295 }
2296
2297 void
2298 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2299         enum rte_eth_event_type event)
2300 {
2301         struct rte_eth_dev_callback *cb_lst;
2302         struct rte_eth_dev_callback dev_cb;
2303
2304         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2305         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
2306                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2307                         continue;
2308                 dev_cb = *cb_lst;
2309                 cb_lst->active = 1;
2310                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2311                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2312                                                 dev_cb.cb_arg);
2313                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2314                 cb_lst->active = 0;
2315         }
2316         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2317 }
2318 #ifdef RTE_NIC_BYPASS
2319 int rte_eth_dev_bypass_init(uint8_t port_id)
2320 {
2321         struct rte_eth_dev *dev;
2322
2323         if (port_id >= nb_ports) {
2324                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2325                 return (-ENODEV);
2326         }
2327
2328         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2329                 PMD_DEBUG_TRACE("Invalid port device\n");
2330                 return (-ENODEV);
2331         }
2332
2333         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2334         (*dev->dev_ops->bypass_init)(dev);
2335         return 0;
2336 }
2337
2338 int
2339 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2340 {
2341         struct rte_eth_dev *dev;
2342
2343         if (port_id >= nb_ports) {
2344                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2345                 return (-ENODEV);
2346         }
2347
2348         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2349                 PMD_DEBUG_TRACE("Invalid port device\n");
2350                 return (-ENODEV);
2351         }
2352         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2353         (*dev->dev_ops->bypass_state_show)(dev, state);
2354         return 0;
2355 }
2356
2357 int
2358 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2359 {
2360         struct rte_eth_dev *dev;
2361
2362         if (port_id >= nb_ports) {
2363                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2364                 return (-ENODEV);
2365         }
2366
2367         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2368                 PMD_DEBUG_TRACE("Invalid port device\n");
2369                 return (-ENODEV);
2370         }
2371
2372         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2373         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2374         return 0;
2375 }
2376
2377 int
2378 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2379 {
2380         struct rte_eth_dev *dev;
2381
2382         if (port_id >= nb_ports) {
2383                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2384                 return (-ENODEV);
2385         }
2386
2387         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2388                 PMD_DEBUG_TRACE("Invalid port device\n");
2389                 return (-ENODEV);
2390         }
2391
2392         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2393         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2394         return 0;
2395 }
2396
2397 int
2398 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2399 {
2400         struct rte_eth_dev *dev;
2401
2402         if (port_id >= nb_ports) {
2403                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2404                 return (-ENODEV);
2405         }
2406
2407         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2408                 PMD_DEBUG_TRACE("Invalid port device\n");
2409                 return (-ENODEV);
2410         }
2411
2412         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2413         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2414         return 0;
2415 }
2416
2417 int
2418 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2419 {
2420         struct rte_eth_dev *dev;
2421
2422         if (port_id >= nb_ports) {
2423                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2424                 return (-ENODEV);
2425         }
2426
2427         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2428                 PMD_DEBUG_TRACE("Invalid port device\n");
2429                 return (-ENODEV);
2430         }
2431
2432         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2433         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2434         return 0;
2435 }
2436
2437 int
2438 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2439 {
2440         struct rte_eth_dev *dev;
2441
2442         if (port_id >= nb_ports) {
2443                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2444                 return (-ENODEV);
2445         }
2446
2447         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2448                 PMD_DEBUG_TRACE("Invalid port device\n");
2449                 return (-ENODEV);
2450         }
2451
2452         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2453         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2454         return 0;
2455 }
2456
2457 int
2458 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2459 {
2460         struct rte_eth_dev *dev;
2461
2462         if (port_id >= nb_ports) {
2463                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2464                 return (-ENODEV);
2465         }
2466
2467         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2468                 PMD_DEBUG_TRACE("Invalid port device\n");
2469                 return (-ENODEV);
2470         }
2471
2472         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2473         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2474         return 0;
2475 }
2476
2477 int
2478 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2479 {
2480         struct rte_eth_dev *dev;
2481
2482         if (port_id >= nb_ports) {
2483                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2484                 return (-ENODEV);
2485         }
2486
2487         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2488                 PMD_DEBUG_TRACE("Invalid port device\n");
2489                 return (-ENODEV);
2490         }
2491
2492         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2493         (*dev->dev_ops->bypass_wd_reset)(dev);
2494         return 0;
2495 }
2496 #endif