ethdev: allow to get RSS hash functions and key
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44
45 #include <rte_byteorder.h>
46 #include <rte_log.h>
47 #include <rte_debug.h>
48 #include <rte_interrupts.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
72 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
73                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
74         } while (0)
75 #else
76 #define PMD_DEBUG_TRACE(fmt, args...)
77 #endif
78
79 /* Macros for checking for restricting functions to primary instance only */
80 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
81         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
82                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
83                 return (retval); \
84         } \
85 } while(0)
86 #define PROC_PRIMARY_OR_RET() do { \
87         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
88                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
89                 return; \
90         } \
91 } while(0)
92
93 /* Macros to check for invlaid function pointers in dev_ops structure */
94 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
95         if ((func) == NULL) { \
96                 PMD_DEBUG_TRACE("Function not supported\n"); \
97                 return (retval); \
98         } \
99 } while(0)
100 #define FUNC_PTR_OR_RET(func) do { \
101         if ((func) == NULL) { \
102                 PMD_DEBUG_TRACE("Function not supported\n"); \
103                 return; \
104         } \
105 } while(0)
106
107 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
108 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
109 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
110 static uint8_t nb_ports = 0;
111
112 /* spinlock for eth device callbacks */
113 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
114
115 /**
116  * The user application callback description.
117  *
118  * It contains callback address to be registered by user application,
119  * the pointer to the parameters for callback, and the event type.
120  */
121 struct rte_eth_dev_callback {
122         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
123         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
124         void *cb_arg;                           /**< Parameter for callback */
125         enum rte_eth_event_type event;          /**< Interrupt event type */
126         uint32_t active;                        /**< Callback is executing */
127 };
128
129 enum {
130         STAT_QMAP_TX = 0,
131         STAT_QMAP_RX
132 };
133
134 static inline void
135 rte_eth_dev_data_alloc(void)
136 {
137         const unsigned flags = 0;
138         const struct rte_memzone *mz;
139
140         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
141                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
142                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
143                                 rte_socket_id(), flags);
144         } else
145                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
146         if (mz == NULL)
147                 rte_panic("Cannot allocate memzone for ethernet port data\n");
148
149         rte_eth_dev_data = mz->addr;
150         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
151                 memset(rte_eth_dev_data, 0,
152                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
153 }
154
155 struct rte_eth_dev *
156 rte_eth_dev_allocate(void)
157 {
158         struct rte_eth_dev *eth_dev;
159
160         if (nb_ports == RTE_MAX_ETHPORTS) {
161                 PMD_DEBUG_TRACE("Reached maximum number of ethernet ports\n");
162                 return NULL;
163         }
164
165         if (rte_eth_dev_data == NULL)
166                 rte_eth_dev_data_alloc();
167
168         eth_dev = &rte_eth_devices[nb_ports];
169         eth_dev->data = &rte_eth_dev_data[nb_ports];
170         eth_dev->data->port_id = nb_ports++;
171         return eth_dev;
172 }
173
174 static int
175 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
176                  struct rte_pci_device *pci_dev)
177 {
178         struct eth_driver    *eth_drv;
179         struct rte_eth_dev *eth_dev;
180         int diag;
181
182         eth_drv = (struct eth_driver *)pci_drv;
183
184         eth_dev = rte_eth_dev_allocate();
185         if (eth_dev == NULL)
186                 return -ENOMEM;
187
188         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
189                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
190                                   eth_drv->dev_private_size,
191                                   CACHE_LINE_SIZE);
192                 if (eth_dev->data->dev_private == NULL)
193                         rte_panic("Cannot allocate memzone for private port data\n");
194         }
195         eth_dev->pci_dev = pci_dev;
196         eth_dev->driver = eth_drv;
197         eth_dev->data->rx_mbuf_alloc_failed = 0;
198
199         /* init user callbacks */
200         TAILQ_INIT(&(eth_dev->callbacks));
201
202         /*
203          * Set the default maximum frame size.
204          */
205         eth_dev->data->max_frame_size = ETHER_MAX_LEN;
206
207         /* Invoke PMD device initialization function */
208         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
209         if (diag == 0)
210                 return (0);
211
212         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
213                         " failed\n", pci_drv->name,
214                         (unsigned) pci_dev->id.vendor_id,
215                         (unsigned) pci_dev->id.device_id);
216         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
217                 rte_free(eth_dev->data->dev_private);
218         nb_ports--;
219         return diag;
220 }
221
222 /**
223  * Register an Ethernet [Poll Mode] driver.
224  *
225  * Function invoked by the initialization function of an Ethernet driver
226  * to simultaneously register itself as a PCI driver and as an Ethernet
227  * Poll Mode Driver.
228  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
229  * structure embedded in the *eth_drv* structure, after having stored the
230  * address of the rte_eth_dev_init() function in the *devinit* field of
231  * the *pci_drv* structure.
232  * During the PCI probing phase, the rte_eth_dev_init() function is
233  * invoked for each PCI [Ethernet device] matching the embedded PCI
234  * identifiers provided by the driver.
235  */
236 void
237 rte_eth_driver_register(struct eth_driver *eth_drv)
238 {
239         eth_drv->pci_drv.devinit = rte_eth_dev_init;
240         rte_eal_pci_register(&eth_drv->pci_drv);
241 }
242
243 int
244 rte_eth_dev_socket_id(uint8_t port_id)
245 {
246         if (port_id >= nb_ports)
247                 return -1;
248         return rte_eth_devices[port_id].pci_dev->numa_node;
249 }
250
251 uint8_t
252 rte_eth_dev_count(void)
253 {
254         return (nb_ports);
255 }
256
257 static int
258 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
259 {
260         uint16_t old_nb_queues = dev->data->nb_rx_queues;
261         void **rxq;
262         unsigned i;
263
264         if (dev->data->rx_queues == NULL) { /* first time configuration */
265                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
266                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
267                                 CACHE_LINE_SIZE);
268                 if (dev->data->rx_queues == NULL) {
269                         dev->data->nb_rx_queues = 0;
270                         return -(ENOMEM);
271                 }
272         } else { /* re-configure */
273                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
274
275                 rxq = dev->data->rx_queues;
276
277                 for (i = nb_queues; i < old_nb_queues; i++)
278                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
279                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
280                                 CACHE_LINE_SIZE);
281                 if (rxq == NULL)
282                         return -(ENOMEM);
283
284                 if (nb_queues > old_nb_queues)
285                         memset(rxq + old_nb_queues, 0,
286                                 sizeof(rxq[0]) * (nb_queues - old_nb_queues));
287
288                 dev->data->rx_queues = rxq;
289
290         }
291         dev->data->nb_rx_queues = nb_queues;
292         return (0);
293 }
294
295 static int
296 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
297 {
298         uint16_t old_nb_queues = dev->data->nb_tx_queues;
299         void **txq;
300         unsigned i;
301
302         if (dev->data->tx_queues == NULL) { /* first time configuration */
303                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
304                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
305                                 CACHE_LINE_SIZE);
306                 if (dev->data->tx_queues == NULL) {
307                         dev->data->nb_tx_queues = 0;
308                         return -(ENOMEM);
309                 }
310         } else { /* re-configure */
311                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
312
313                 txq = dev->data->tx_queues;
314
315                 for (i = nb_queues; i < old_nb_queues; i++)
316                         (*dev->dev_ops->tx_queue_release)(txq[i]);
317                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
318                                 CACHE_LINE_SIZE);
319                 if (txq == NULL)
320                         return -(ENOMEM);
321
322                 if (nb_queues > old_nb_queues)
323                         memset(txq + old_nb_queues, 0,
324                                 sizeof(txq[0]) * (nb_queues - old_nb_queues));
325
326                 dev->data->tx_queues = txq;
327
328         }
329         dev->data->nb_tx_queues = nb_queues;
330         return (0);
331 }
332
333 static int
334 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
335                       const struct rte_eth_conf *dev_conf)
336 {
337         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
338
339         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
340                 /* check multi-queue mode */
341                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) || 
342                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
343                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
344                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
345                         /* SRIOV only works in VMDq enable mode */
346                         PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
347                                         "wrong VMDQ mq_mode rx %u tx %u\n", 
348                                         port_id,
349                                         dev_conf->rxmode.mq_mode,
350                                         dev_conf->txmode.mq_mode);
351                         return (-EINVAL);
352                 }
353
354                 switch (dev_conf->rxmode.mq_mode) {
355                 case ETH_MQ_RX_VMDQ_RSS:
356                 case ETH_MQ_RX_VMDQ_DCB:
357                 case ETH_MQ_RX_VMDQ_DCB_RSS:
358                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
359                         PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
360                                         "unsupported VMDQ mq_mode rx %u\n", 
361                                         port_id, dev_conf->rxmode.mq_mode);
362                         return (-EINVAL);
363                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
364                         /* if nothing mq mode configure, use default scheme */
365                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
366                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
367                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
368                         break;
369                 }
370
371                 switch (dev_conf->txmode.mq_mode) {
372                 case ETH_MQ_TX_VMDQ_DCB:
373                         /* DCB VMDQ in SRIOV mode, not implement yet */
374                         PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
375                                         "unsupported VMDQ mq_mode tx %u\n", 
376                                         port_id, dev_conf->txmode.mq_mode);
377                         return (-EINVAL);
378                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
379                         /* if nothing mq mode configure, use default scheme */
380                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
381                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
382                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
383                         break;
384                 }
385
386                 /* check valid queue number */
387                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
388                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
389                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
390                                     "queue number must less equal to %d\n", 
391                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
392                         return (-EINVAL);
393                 }
394         } else {
395                 /* For vmdb+dcb mode check our configuration before we go further */
396                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
397                         const struct rte_eth_vmdq_dcb_conf *conf;
398                         
399                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
400                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
401                                                 "!= %d\n",
402                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
403                                 return (-EINVAL);
404                         }
405                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
406                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
407                                conf->nb_queue_pools == ETH_32_POOLS)) {
408                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
409                                                 "nb_queue_pools must be %d or %d\n",
410                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
411                                 return (-EINVAL);
412                         }
413                 }
414                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
415                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
416                         
417                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
418                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
419                                                 "!= %d\n",
420                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
421                                 return (-EINVAL);
422                         }
423                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
424                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
425                                conf->nb_queue_pools == ETH_32_POOLS)) {
426                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
427                                                 "nb_queue_pools != %d or nb_queue_pools "
428                                                 "!= %d\n",
429                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
430                                 return (-EINVAL);
431                         }
432                 }
433                 
434                 /* For DCB mode check our configuration before we go further */
435                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
436                         const struct rte_eth_dcb_rx_conf *conf;
437                         
438                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
439                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
440                                                 "!= %d\n",
441                                                 port_id, ETH_DCB_NUM_QUEUES);
442                                 return (-EINVAL);
443                         }
444                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
445                         if (! (conf->nb_tcs == ETH_4_TCS ||
446                                conf->nb_tcs == ETH_8_TCS)) {
447                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
448                                                 "nb_tcs != %d or nb_tcs "
449                                                 "!= %d\n",
450                                                 port_id, ETH_4_TCS, ETH_8_TCS);
451                                 return (-EINVAL);
452                         }
453                 }
454                 
455                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
456                         const struct rte_eth_dcb_tx_conf *conf;
457                         
458                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
459                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
460                                                 "!= %d\n",
461                                                 port_id, ETH_DCB_NUM_QUEUES);
462                                 return (-EINVAL);
463                         }
464                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
465                         if (! (conf->nb_tcs == ETH_4_TCS ||
466                                conf->nb_tcs == ETH_8_TCS)) {
467                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
468                                                 "nb_tcs != %d or nb_tcs "
469                                                 "!= %d\n",
470                                                 port_id, ETH_4_TCS, ETH_8_TCS);
471                                 return (-EINVAL);
472                         }
473                 }
474         }
475         return 0;
476 }
477
478 int
479 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
480                       const struct rte_eth_conf *dev_conf)
481 {
482         struct rte_eth_dev *dev;
483         struct rte_eth_dev_info dev_info;
484         int diag;
485
486         /* This function is only safe when called from the primary process
487          * in a multi-process setup*/
488         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
489
490         if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
491                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
492                 return (-EINVAL);
493         }
494         dev = &rte_eth_devices[port_id];
495
496         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
497         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
498
499         if (dev->data->dev_started) {
500                 PMD_DEBUG_TRACE(
501                     "port %d must be stopped to allow configuration\n", port_id);
502                 return (-EBUSY);
503         }
504
505         /*
506          * Check that the numbers of RX and TX queues are not greater
507          * than the maximum number of RX and TX queues supported by the
508          * configured device.
509          */
510         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
511         if (nb_rx_q > dev_info.max_rx_queues) {
512                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
513                                 port_id, nb_rx_q, dev_info.max_rx_queues);
514                 return (-EINVAL);
515         }
516         if (nb_rx_q == 0) {
517                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
518                 return (-EINVAL);
519         }
520
521         if (nb_tx_q > dev_info.max_tx_queues) {
522                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
523                                 port_id, nb_tx_q, dev_info.max_tx_queues);
524                 return (-EINVAL);
525         }
526         if (nb_tx_q == 0) {
527                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
528                 return (-EINVAL);
529         }
530
531         /* Copy the dev_conf parameter into the dev structure */
532         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
533
534         /*
535          * If jumbo frames are enabled, check that the maximum RX packet
536          * length is supported by the configured device.
537          */
538         if (dev_conf->rxmode.jumbo_frame == 1) {
539                 if (dev_conf->rxmode.max_rx_pkt_len >
540                     dev_info.max_rx_pktlen) {
541                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
542                                 " > max valid value %u\n",
543                                 port_id,
544                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
545                                 (unsigned)dev_info.max_rx_pktlen);
546                         return (-EINVAL);
547                 }
548                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
549                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
550                                 " < min valid value %u\n",
551                                 port_id,
552                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
553                                 (unsigned)ETHER_MIN_LEN);
554                         return (-EINVAL);
555                 }
556         } else
557                 /* Use default value */
558                 dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
559
560         /* multipe queue mode checking */
561         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
562         if (diag != 0) {
563                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
564                                 port_id, diag);
565                 return diag;
566         }
567
568         /*
569          * Setup new number of RX/TX queues and reconfigure device.
570          */
571         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
572         if (diag != 0) {
573                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
574                                 port_id, diag);
575                 return diag;
576         }
577
578         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
579         if (diag != 0) {
580                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
581                                 port_id, diag);
582                 rte_eth_dev_rx_queue_config(dev, 0);
583                 return diag;
584         }
585
586         diag = (*dev->dev_ops->dev_configure)(dev);
587         if (diag != 0) {
588                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
589                                 port_id, diag);
590                 rte_eth_dev_rx_queue_config(dev, 0);
591                 rte_eth_dev_tx_queue_config(dev, 0);
592                 return diag;
593         }
594
595         return 0;
596 }
597
598 static void
599 rte_eth_dev_config_restore(uint8_t port_id)
600 {
601         struct rte_eth_dev *dev;
602         struct rte_eth_dev_info dev_info;
603         struct ether_addr addr;
604         uint16_t i;
605         uint32_t pool = 0;
606
607         dev = &rte_eth_devices[port_id];
608
609         rte_eth_dev_info_get(port_id, &dev_info);
610
611         if (RTE_ETH_DEV_SRIOV(dev).active)
612                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
613
614         /* replay MAC address configuration */
615         for (i = 0; i < dev_info.max_mac_addrs; i++) {
616                 addr = dev->data->mac_addrs[i];
617
618                 /* skip zero address */
619                 if (is_zero_ether_addr(&addr))
620                         continue;
621
622                 /* add address to the hardware */
623                 if  (*dev->dev_ops->mac_addr_add)
624                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
625                 else {
626                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
627                                         port_id);
628                         /* exit the loop but not return an error */
629                         break;
630                 }
631         }
632
633         /* replay promiscuous configuration */
634         if (rte_eth_promiscuous_get(port_id) == 1)
635                 rte_eth_promiscuous_enable(port_id);
636         else if (rte_eth_promiscuous_get(port_id) == 0)
637                 rte_eth_promiscuous_disable(port_id);
638
639         /* replay allmulticast configuration */
640         if (rte_eth_allmulticast_get(port_id) == 1)
641                 rte_eth_allmulticast_enable(port_id);
642         else if (rte_eth_allmulticast_get(port_id) == 0)
643                 rte_eth_allmulticast_disable(port_id);
644 }
645
646 int
647 rte_eth_dev_start(uint8_t port_id)
648 {
649         struct rte_eth_dev *dev;
650         int diag;
651
652         /* This function is only safe when called from the primary process
653          * in a multi-process setup*/
654         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
655
656         if (port_id >= nb_ports) {
657                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
658                 return (-EINVAL);
659         }
660         dev = &rte_eth_devices[port_id];
661
662         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
663         diag = (*dev->dev_ops->dev_start)(dev);
664         if (diag == 0)
665                 dev->data->dev_started = 1;
666         else
667                 return diag;
668
669         rte_eth_dev_config_restore(port_id);
670
671         return 0;
672 }
673
674 void
675 rte_eth_dev_stop(uint8_t port_id)
676 {
677         struct rte_eth_dev *dev;
678
679         /* This function is only safe when called from the primary process
680          * in a multi-process setup*/
681         PROC_PRIMARY_OR_RET();
682
683         if (port_id >= nb_ports) {
684                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
685                 return;
686         }
687         dev = &rte_eth_devices[port_id];
688
689         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
690         dev->data->dev_started = 0;
691         (*dev->dev_ops->dev_stop)(dev);
692 }
693
694 void
695 rte_eth_dev_close(uint8_t port_id)
696 {
697         struct rte_eth_dev *dev;
698
699         /* This function is only safe when called from the primary process
700          * in a multi-process setup*/
701         PROC_PRIMARY_OR_RET();
702
703         if (port_id >= nb_ports) {
704                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
705                 return;
706         }
707
708         dev = &rte_eth_devices[port_id];
709
710         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
711         dev->data->dev_started = 0;
712         (*dev->dev_ops->dev_close)(dev);
713 }
714
715 int
716 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
717                        uint16_t nb_rx_desc, unsigned int socket_id,
718                        const struct rte_eth_rxconf *rx_conf,
719                        struct rte_mempool *mp)
720 {
721         struct rte_eth_dev *dev;
722         struct rte_pktmbuf_pool_private *mbp_priv;
723         struct rte_eth_dev_info dev_info;
724
725         /* This function is only safe when called from the primary process
726          * in a multi-process setup*/
727         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
728
729         if (port_id >= nb_ports) {
730                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
731                 return (-EINVAL);
732         }
733         dev = &rte_eth_devices[port_id];
734         if (rx_queue_id >= dev->data->nb_rx_queues) {
735                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
736                 return (-EINVAL);
737         }
738
739         if (dev->data->dev_started) {
740                 PMD_DEBUG_TRACE(
741                     "port %d must be stopped to allow configuration\n", port_id);
742                 return -EBUSY;
743         }
744
745         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
746         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
747
748         /*
749          * Check the size of the mbuf data buffer.
750          * This value must be provided in the private data of the memory pool.
751          * First check that the memory pool has a valid private data.
752          */
753         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
754         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
755                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
756                                 mp->name, (int) mp->private_data_size,
757                                 (int) sizeof(struct rte_pktmbuf_pool_private));
758                 return (-ENOSPC);
759         }
760         mbp_priv = rte_mempool_get_priv(mp);
761         if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
762             dev_info.min_rx_bufsize) {
763                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
764                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
765                                 "=%d)\n",
766                                 mp->name,
767                                 (int)mbp_priv->mbuf_data_room_size,
768                                 (int)(RTE_PKTMBUF_HEADROOM +
769                                       dev_info.min_rx_bufsize),
770                                 (int)RTE_PKTMBUF_HEADROOM,
771                                 (int)dev_info.min_rx_bufsize);
772                 return (-EINVAL);
773         }
774
775         return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
776                                                socket_id, rx_conf, mp);
777 }
778
779 int
780 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
781                        uint16_t nb_tx_desc, unsigned int socket_id,
782                        const struct rte_eth_txconf *tx_conf)
783 {
784         struct rte_eth_dev *dev;
785
786         /* This function is only safe when called from the primary process
787          * in a multi-process setup*/
788         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
789
790         if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
791                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
792                 return (-EINVAL);
793         }
794         dev = &rte_eth_devices[port_id];
795         if (tx_queue_id >= dev->data->nb_tx_queues) {
796                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
797                 return (-EINVAL);
798         }
799
800         if (dev->data->dev_started) {
801                 PMD_DEBUG_TRACE(
802                     "port %d must be stopped to allow configuration\n", port_id);
803                 return -EBUSY;
804         }
805
806         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
807         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
808                                                socket_id, tx_conf);
809 }
810
811 void
812 rte_eth_promiscuous_enable(uint8_t port_id)
813 {
814         struct rte_eth_dev *dev;
815
816         if (port_id >= nb_ports) {
817                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
818                 return;
819         }
820         dev = &rte_eth_devices[port_id];
821
822         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
823         (*dev->dev_ops->promiscuous_enable)(dev);
824         dev->data->promiscuous = 1;
825 }
826
827 void
828 rte_eth_promiscuous_disable(uint8_t port_id)
829 {
830         struct rte_eth_dev *dev;
831
832         if (port_id >= nb_ports) {
833                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
834                 return;
835         }
836         dev = &rte_eth_devices[port_id];
837
838         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
839         dev->data->promiscuous = 0;
840         (*dev->dev_ops->promiscuous_disable)(dev);
841 }
842
843 int
844 rte_eth_promiscuous_get(uint8_t port_id)
845 {
846         struct rte_eth_dev *dev;
847
848         if (port_id >= nb_ports) {
849                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
850                 return -1;
851         }
852
853         dev = &rte_eth_devices[port_id];
854         return dev->data->promiscuous;
855 }
856
857 void
858 rte_eth_allmulticast_enable(uint8_t port_id)
859 {
860         struct rte_eth_dev *dev;
861
862         if (port_id >= nb_ports) {
863                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
864                 return;
865         }
866         dev = &rte_eth_devices[port_id];
867
868         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
869         (*dev->dev_ops->allmulticast_enable)(dev);
870         dev->data->all_multicast = 1;
871 }
872
873 void
874 rte_eth_allmulticast_disable(uint8_t port_id)
875 {
876         struct rte_eth_dev *dev;
877
878         if (port_id >= nb_ports) {
879                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
880                 return;
881         }
882         dev = &rte_eth_devices[port_id];
883
884         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
885         dev->data->all_multicast = 0;
886         (*dev->dev_ops->allmulticast_disable)(dev);
887 }
888
889 int
890 rte_eth_allmulticast_get(uint8_t port_id)
891 {
892         struct rte_eth_dev *dev;
893
894         if (port_id >= nb_ports) {
895                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
896                 return -1;
897         }
898
899         dev = &rte_eth_devices[port_id];
900         return dev->data->all_multicast;
901 }
902
903 static inline int
904 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
905                                 struct rte_eth_link *link)
906 {
907         struct rte_eth_link *dst = link;
908         struct rte_eth_link *src = &(dev->data->dev_link);
909
910         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
911                                         *(uint64_t *)src) == 0)
912                 return -1;
913
914         return 0;
915 }
916
917 void
918 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
919 {
920         struct rte_eth_dev *dev;
921
922         if (port_id >= nb_ports) {
923                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
924                 return;
925         }
926         dev = &rte_eth_devices[port_id];
927         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
928
929         if (dev->data->dev_conf.intr_conf.lsc != 0)
930                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
931         else {
932                 (*dev->dev_ops->link_update)(dev, 1);
933                 *eth_link = dev->data->dev_link;
934         }
935 }
936
937 void
938 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
939 {
940         struct rte_eth_dev *dev;
941
942         if (port_id >= nb_ports) {
943                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
944                 return;
945         }
946         dev = &rte_eth_devices[port_id];
947         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
948
949         if (dev->data->dev_conf.intr_conf.lsc != 0)
950                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
951         else {
952                 (*dev->dev_ops->link_update)(dev, 0);
953                 *eth_link = dev->data->dev_link;
954         }
955 }
956
957 void
958 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
959 {
960         struct rte_eth_dev *dev;
961
962         if (port_id >= nb_ports) {
963                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
964                 return;
965         }
966         dev = &rte_eth_devices[port_id];
967         memset(stats, 0, sizeof(*stats));
968
969         FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
970         (*dev->dev_ops->stats_get)(dev, stats);
971         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
972 }
973
974 void
975 rte_eth_stats_reset(uint8_t port_id)
976 {
977         struct rte_eth_dev *dev;
978
979         if (port_id >= nb_ports) {
980                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
981                 return;
982         }
983         dev = &rte_eth_devices[port_id];
984
985         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
986         (*dev->dev_ops->stats_reset)(dev);
987 }
988
989
990 static int
991 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
992                 uint8_t is_rx)
993 {
994         struct rte_eth_dev *dev;
995
996         if (port_id >= nb_ports) {
997                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
998                 return -ENODEV;
999         }
1000         dev = &rte_eth_devices[port_id];
1001
1002         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1003         return (*dev->dev_ops->queue_stats_mapping_set)
1004                         (dev, queue_id, stat_idx, is_rx);
1005 }
1006
1007
1008 int
1009 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1010                 uint8_t stat_idx)
1011 {
1012         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1013                         STAT_QMAP_TX);
1014 }
1015
1016
1017 int
1018 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1019                 uint8_t stat_idx)
1020 {
1021         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1022                         STAT_QMAP_RX);
1023 }
1024
1025
1026 void
1027 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1028 {
1029         struct rte_eth_dev *dev;
1030
1031         if (port_id >= nb_ports) {
1032                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1033                 return;
1034         }
1035         dev = &rte_eth_devices[port_id];
1036
1037         /* Default device offload capabilities to zero */
1038         dev_info->rx_offload_capa = 0;
1039         dev_info->tx_offload_capa = 0;
1040         dev_info->if_index = 0;
1041         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1042         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1043         dev_info->pci_dev = dev->pci_dev;
1044         if (dev->driver)
1045                 dev_info->driver_name = dev->driver->pci_drv.name;
1046 }
1047
1048 void
1049 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1050 {
1051         struct rte_eth_dev *dev;
1052
1053         if (port_id >= nb_ports) {
1054                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1055                 return;
1056         }
1057         dev = &rte_eth_devices[port_id];
1058         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1059 }
1060
1061 int
1062 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1063 {
1064         struct rte_eth_dev *dev;
1065
1066         if (port_id >= nb_ports) {
1067                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1068                 return (-ENODEV);
1069         }
1070         dev = &rte_eth_devices[port_id];
1071         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1072                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1073                 return (-ENOSYS);
1074         }
1075
1076         if (vlan_id > 4095) {
1077                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1078                                 port_id, (unsigned) vlan_id);
1079                 return (-EINVAL);
1080         }
1081         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1082         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1083         return (0);
1084 }
1085
1086 int
1087 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1088 {
1089         struct rte_eth_dev *dev;
1090
1091         if (port_id >= nb_ports) {
1092                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1093                 return (-ENODEV);
1094         }
1095
1096         dev = &rte_eth_devices[port_id];
1097         if (rx_queue_id >= dev->data->nb_rx_queues) {
1098                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1099                 return (-EINVAL);
1100         }
1101
1102         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1103         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1104
1105         return (0);
1106 }
1107
1108 int
1109 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1110 {
1111         struct rte_eth_dev *dev;
1112
1113         if (port_id >= nb_ports) {
1114                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1115                 return (-ENODEV);
1116         }
1117
1118         dev = &rte_eth_devices[port_id];
1119         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1120         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1121
1122         return (0);
1123 }
1124
1125 int
1126 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1127 {
1128         struct rte_eth_dev *dev;
1129         int ret = 0;
1130         int mask = 0;
1131         int cur, org = 0;
1132         
1133         if (port_id >= nb_ports) {
1134                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1135                 return (-ENODEV);
1136         }
1137
1138         dev = &rte_eth_devices[port_id];
1139
1140         /*check which option changed by application*/
1141         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1142         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1143         if (cur != org){
1144                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1145                 mask |= ETH_VLAN_STRIP_MASK;
1146         }
1147         
1148         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1149         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1150         if (cur != org){
1151                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1152                 mask |= ETH_VLAN_FILTER_MASK;
1153         }
1154
1155         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1156         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1157         if (cur != org){
1158                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1159                 mask |= ETH_VLAN_EXTEND_MASK;
1160         }
1161
1162         /*no change*/
1163         if(mask == 0)
1164                 return ret;
1165         
1166         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1167         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1168
1169         return ret;
1170 }
1171
1172 int
1173 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1174 {
1175         struct rte_eth_dev *dev;
1176         int ret = 0;
1177
1178         if (port_id >= nb_ports) {
1179                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1180                 return (-ENODEV);
1181         }
1182
1183         dev = &rte_eth_devices[port_id];
1184
1185         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1186                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1187
1188         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1189                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1190
1191         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1192                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1193
1194         return ret;
1195 }
1196
1197
1198 int
1199 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1200                                       struct rte_fdir_filter *fdir_filter,
1201                                       uint8_t queue)
1202 {
1203         struct rte_eth_dev *dev;
1204
1205         if (port_id >= nb_ports) {
1206                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1207                 return (-ENODEV);
1208         }
1209
1210         dev = &rte_eth_devices[port_id];
1211
1212         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1213                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1214                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1215                 return (-ENOSYS);
1216         }
1217
1218         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1219              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1220             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1221                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1222                                 "None l4type, source & destinations ports " \
1223                                 "should be null!\n");
1224                 return (-EINVAL);
1225         }
1226
1227         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1228         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1229                                                                 queue);
1230 }
1231
1232 int
1233 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1234                                          struct rte_fdir_filter *fdir_filter,
1235                                          uint8_t queue)
1236 {
1237         struct rte_eth_dev *dev;
1238
1239         if (port_id >= nb_ports) {
1240                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1241                 return (-ENODEV);
1242         }
1243
1244         dev = &rte_eth_devices[port_id];
1245
1246         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1247                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1248                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1249                 return (-ENOSYS);
1250         }
1251
1252         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1253              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1254             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1255                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1256                                 "None l4type, source & destinations ports " \
1257                                 "should be null!\n");
1258                 return (-EINVAL);
1259         }
1260
1261         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1262         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1263                                                                 queue);
1264
1265 }
1266
1267 int
1268 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1269                                          struct rte_fdir_filter *fdir_filter)
1270 {
1271         struct rte_eth_dev *dev;
1272
1273         if (port_id >= nb_ports) {
1274                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1275                 return (-ENODEV);
1276         }
1277
1278         dev = &rte_eth_devices[port_id];
1279
1280         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1281                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1282                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1283                 return (-ENOSYS);
1284         }
1285
1286         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1287              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1288             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1289                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1290                                 "None l4type source & destinations ports " \
1291                                 "should be null!\n");
1292                 return (-EINVAL);
1293         }
1294
1295         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1296         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1297 }
1298
1299 int
1300 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1301 {
1302         struct rte_eth_dev *dev;
1303
1304         if (port_id >= nb_ports) {
1305                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1306                 return (-ENODEV);
1307         }
1308
1309         dev = &rte_eth_devices[port_id];
1310         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1311                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1312                 return (-ENOSYS);
1313         }
1314
1315         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1316
1317         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1318         return (0);
1319 }
1320
1321 int
1322 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1323                                     struct rte_fdir_filter *fdir_filter,
1324                                     uint16_t soft_id, uint8_t queue,
1325                                     uint8_t drop)
1326 {
1327         struct rte_eth_dev *dev;
1328
1329         if (port_id >= nb_ports) {
1330                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1331                 return (-ENODEV);
1332         }
1333
1334         dev = &rte_eth_devices[port_id];
1335
1336         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1337                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1338                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1339                 return (-ENOSYS);
1340         }
1341
1342         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1343              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1344             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1345                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1346                                 "None l4type, source & destinations ports " \
1347                                 "should be null!\n");
1348                 return (-EINVAL);
1349         }
1350
1351         /* For now IPv6 is not supported with perfect filter */
1352         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1353                 return (-ENOTSUP);
1354
1355         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1356         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1357                                                                 soft_id, queue,
1358                                                                 drop);
1359 }
1360
1361 int
1362 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1363                                        struct rte_fdir_filter *fdir_filter,
1364                                        uint16_t soft_id, uint8_t queue,
1365                                        uint8_t drop)
1366 {
1367         struct rte_eth_dev *dev;
1368
1369         if (port_id >= nb_ports) {
1370                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1371                 return (-ENODEV);
1372         }
1373
1374         dev = &rte_eth_devices[port_id];
1375
1376         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1377                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1378                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1379                 return (-ENOSYS);
1380         }
1381
1382         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1383              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1384             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1385                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1386                                 "None l4type, source & destinations ports " \
1387                                 "should be null!\n");
1388                 return (-EINVAL);
1389         }
1390
1391         /* For now IPv6 is not supported with perfect filter */
1392         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1393                 return (-ENOTSUP);
1394
1395         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1396         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1397                                                         soft_id, queue, drop);
1398 }
1399
1400 int
1401 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1402                                        struct rte_fdir_filter *fdir_filter,
1403                                        uint16_t soft_id)
1404 {
1405         struct rte_eth_dev *dev;
1406
1407         if (port_id >= nb_ports) {
1408                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1409                 return (-ENODEV);
1410         }
1411
1412         dev = &rte_eth_devices[port_id];
1413
1414         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1415                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1416                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1417                 return (-ENOSYS);
1418         }
1419
1420         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1421              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1422             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1423                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1424                                 "None l4type, source & destinations ports " \
1425                                 "should be null!\n");
1426                 return (-EINVAL);
1427         }
1428
1429         /* For now IPv6 is not supported with perfect filter */
1430         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1431                 return (-ENOTSUP);
1432
1433         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1434         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
1435                                                                 soft_id);
1436 }
1437
1438 int
1439 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1440 {
1441         struct rte_eth_dev *dev;
1442
1443         if (port_id >= nb_ports) {
1444                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1445                 return (-ENODEV);
1446         }
1447
1448         dev = &rte_eth_devices[port_id];
1449         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1450                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1451                 return (-ENOSYS);
1452         }
1453
1454         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1455         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1456 }
1457
1458 int
1459 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1460 {
1461         struct rte_eth_dev *dev;
1462
1463         if (port_id >= nb_ports) {
1464                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1465                 return (-ENODEV);
1466         }
1467
1468         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1469                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1470                 return (-EINVAL);
1471         }
1472
1473         dev = &rte_eth_devices[port_id];
1474         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1475         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1476 }
1477
1478 int
1479 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1480 {
1481         struct rte_eth_dev *dev;
1482
1483         if (port_id >= nb_ports) {
1484                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1485                 return (-ENODEV);
1486         }
1487
1488         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1489                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1490                 return (-EINVAL);
1491         }
1492
1493         dev = &rte_eth_devices[port_id];
1494         /* High water, low water validation are device specific */
1495         if  (*dev->dev_ops->priority_flow_ctrl_set)
1496                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1497         return (-ENOTSUP);
1498 }
1499
1500 int
1501 rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1502 {
1503         struct rte_eth_dev *dev;
1504         uint16_t max_rxq;
1505         uint8_t i,j;
1506
1507         if (port_id >= nb_ports) {
1508                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1509                 return (-ENODEV);
1510         }
1511
1512         /* Invalid mask bit(s) setting */
1513         if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1514                 PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
1515                 return (-EINVAL);
1516         }
1517
1518         dev = &rte_eth_devices[port_id];
1519         max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ?
1520                 dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE;
1521         if (reta_conf->mask_lo != 0) {
1522                 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1523                         if ((reta_conf->mask_lo & (1ULL << i)) &&
1524                                 (reta_conf->reta[i] >= max_rxq)) {
1525                                 PMD_DEBUG_TRACE("RETA hash index output"
1526                                         "configration for port=%d,invalid"
1527                                         "queue=%d\n",port_id,reta_conf->reta[i]);
1528
1529                                 return (-EINVAL);
1530                         } 
1531                 }
1532         }
1533
1534         if (reta_conf->mask_hi != 0) {
1535                 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {       
1536                         j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
1537
1538                         /* Check if the max entry >= 128 */
1539                         if ((reta_conf->mask_hi & (1ULL << i)) && 
1540                                 (reta_conf->reta[j] >= max_rxq)) {
1541                                 PMD_DEBUG_TRACE("RETA hash index output"
1542                                         "configration for port=%d,invalid"
1543                                         "queue=%d\n",port_id,reta_conf->reta[j]);
1544
1545                                 return (-EINVAL);
1546                         }
1547                 }
1548         }
1549
1550         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1551         return (*dev->dev_ops->reta_update)(dev, reta_conf);
1552 }
1553
1554 int 
1555 rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1556 {
1557         struct rte_eth_dev *dev;
1558         
1559         if (port_id >= nb_ports) {
1560                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1561                 return (-ENODEV);
1562         }
1563
1564         if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1565                 PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
1566                 return (-EINVAL);
1567         }
1568
1569         dev = &rte_eth_devices[port_id];
1570         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1571         return (*dev->dev_ops->reta_query)(dev, reta_conf);
1572 }
1573
1574 int
1575 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1576 {
1577         struct rte_eth_dev *dev;
1578         uint16_t rss_hash_protos;
1579
1580         if (port_id >= nb_ports) {
1581                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1582                 return (-ENODEV);
1583         }
1584         rss_hash_protos = rss_conf->rss_hf;
1585         if ((rss_hash_protos != 0) &&
1586             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1587                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1588                                 rss_hash_protos);
1589                 return (-EINVAL);
1590         }
1591         dev = &rte_eth_devices[port_id];
1592         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1593         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1594 }
1595
1596 int
1597 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1598                               struct rte_eth_rss_conf *rss_conf)
1599 {
1600         struct rte_eth_dev *dev;
1601
1602         if (port_id >= nb_ports) {
1603                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1604                 return (-ENODEV);
1605         }
1606         dev = &rte_eth_devices[port_id];
1607         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1608         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1609 }
1610
1611 int
1612 rte_eth_led_on(uint8_t port_id)
1613 {
1614         struct rte_eth_dev *dev;
1615
1616         if (port_id >= nb_ports) {
1617                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1618                 return (-ENODEV);
1619         }
1620
1621         dev = &rte_eth_devices[port_id];
1622         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1623         return ((*dev->dev_ops->dev_led_on)(dev));
1624 }
1625
1626 int
1627 rte_eth_led_off(uint8_t port_id)
1628 {
1629         struct rte_eth_dev *dev;
1630
1631         if (port_id >= nb_ports) {
1632                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1633                 return (-ENODEV);
1634         }
1635
1636         dev = &rte_eth_devices[port_id];
1637         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1638         return ((*dev->dev_ops->dev_led_off)(dev));
1639 }
1640
1641 /*
1642  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1643  * an empty spot.
1644  */
1645 static inline int
1646 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1647 {
1648         struct rte_eth_dev_info dev_info;
1649         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1650         unsigned i;
1651
1652         rte_eth_dev_info_get(port_id, &dev_info);
1653
1654         for (i = 0; i < dev_info.max_mac_addrs; i++)
1655                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1656                         return i;
1657
1658         return -1;
1659 }
1660
1661 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
1662
1663 int
1664 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
1665                         uint32_t pool)
1666 {
1667         struct rte_eth_dev *dev;
1668         int index;
1669         uint64_t pool_mask;
1670
1671         if (port_id >= nb_ports) {
1672                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1673                 return (-ENODEV);
1674         }
1675         dev = &rte_eth_devices[port_id];
1676         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
1677
1678         if (is_zero_ether_addr(addr)) {
1679                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", 
1680                         port_id);
1681                 return (-EINVAL);
1682         }
1683         if (pool >= ETH_64_POOLS) {
1684                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
1685                 return (-EINVAL);
1686         }
1687         
1688         index = get_mac_addr_index(port_id, addr);
1689         if (index < 0) {
1690                 index = get_mac_addr_index(port_id, &null_mac_addr);
1691                 if (index < 0) {
1692                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1693                                 port_id);
1694                         return (-ENOSPC);
1695                 }
1696         } else {
1697                 pool_mask = dev->data->mac_pool_sel[index];
1698                 
1699                 /* Check if both MAC address and pool is alread there, and do nothing */
1700                 if (pool_mask & (1ULL << pool))
1701                         return 0;
1702         }
1703
1704         /* Update NIC */
1705         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
1706
1707         /* Update address in NIC data structure */
1708         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
1709         
1710         /* Update pool bitmap in NIC data structure */
1711         dev->data->mac_pool_sel[index] |= (1ULL << pool);
1712
1713         return 0;
1714 }
1715
1716 int
1717 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
1718 {
1719         struct rte_eth_dev *dev;
1720         int index;
1721
1722         if (port_id >= nb_ports) {
1723                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1724                 return (-ENODEV);
1725         }
1726         dev = &rte_eth_devices[port_id];
1727         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
1728
1729         index = get_mac_addr_index(port_id, addr);
1730         if (index == 0) {
1731                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
1732                 return (-EADDRINUSE);
1733         } else if (index < 0)
1734                 return 0;  /* Do nothing if address wasn't found */
1735
1736         /* Update NIC */
1737         (*dev->dev_ops->mac_addr_remove)(dev, index);
1738
1739         /* Update address in NIC data structure */
1740         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
1741
1742         return 0;
1743 }
1744
1745 int 
1746 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
1747                                 uint16_t rx_mode, uint8_t on)
1748 {
1749         uint16_t num_vfs;
1750         struct rte_eth_dev *dev;
1751         struct rte_eth_dev_info dev_info;
1752
1753         if (port_id >= nb_ports) {
1754                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
1755                                 port_id);
1756                 return (-ENODEV);
1757         }       
1758         
1759         dev = &rte_eth_devices[port_id];
1760         rte_eth_dev_info_get(port_id, &dev_info);
1761
1762         num_vfs = dev_info.max_vfs;
1763         if (vf > num_vfs)
1764         {
1765                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
1766                 return (-EINVAL);
1767         }
1768         if (rx_mode == 0)
1769         {
1770                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
1771                 return (-EINVAL);       
1772         }
1773         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
1774         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
1775 }
1776
1777 /*
1778  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1779  * an empty spot.
1780  */
1781 static inline int
1782 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1783 {
1784         struct rte_eth_dev_info dev_info;
1785         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1786         unsigned i;
1787
1788         rte_eth_dev_info_get(port_id, &dev_info);
1789         if (!dev->data->hash_mac_addrs)
1790                 return -1;
1791
1792         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
1793                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
1794                         ETHER_ADDR_LEN) == 0)
1795                         return i;
1796
1797         return -1;
1798 }
1799
1800 int
1801 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
1802                                 uint8_t on)
1803 {
1804         int index;
1805         int ret;
1806         struct rte_eth_dev *dev;
1807         
1808         if (port_id >= nb_ports) {
1809                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
1810                         port_id);
1811                 return (-ENODEV);
1812         }
1813         
1814         dev = &rte_eth_devices[port_id];
1815         if (is_zero_ether_addr(addr)) {
1816                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", 
1817                         port_id);
1818                 return (-EINVAL);
1819         }
1820
1821         index = get_hash_mac_addr_index(port_id, addr);
1822         /* Check if it's already there, and do nothing */
1823         if ((index >= 0) && (on))
1824                 return 0;
1825         
1826         if (index < 0) {
1827                 if (!on) {
1828                         PMD_DEBUG_TRACE("port %d: the MAC address was not" 
1829                                 "set in UTA\n", port_id);
1830                         return (-EINVAL);
1831                 }
1832                         
1833                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
1834                 if (index < 0) {
1835                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1836                                         port_id);
1837                         return (-ENOSPC);
1838                 }
1839         } 
1840          
1841         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
1842         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
1843         if (ret == 0) {
1844                 /* Update address in NIC data structure */
1845                 if (on)
1846                         ether_addr_copy(addr,
1847                                         &dev->data->hash_mac_addrs[index]);
1848                 else 
1849                         ether_addr_copy(&null_mac_addr,
1850                                         &dev->data->hash_mac_addrs[index]);
1851         }
1852         
1853         return ret;
1854 }
1855
1856 int
1857 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
1858 {
1859         struct rte_eth_dev *dev;
1860         
1861         if (port_id >= nb_ports) {
1862                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
1863                         port_id);
1864                 return (-ENODEV);
1865         }
1866         
1867         dev = &rte_eth_devices[port_id];
1868
1869         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
1870         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
1871 }
1872
1873 int 
1874 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
1875 {
1876         uint16_t num_vfs;
1877         struct rte_eth_dev *dev;
1878         struct rte_eth_dev_info dev_info;
1879
1880         if (port_id >= nb_ports) {
1881                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1882                 return (-ENODEV);
1883         }
1884         
1885         dev = &rte_eth_devices[port_id];
1886         rte_eth_dev_info_get(port_id, &dev_info);
1887         
1888         num_vfs = dev_info.max_vfs;
1889         if (vf > num_vfs) 
1890         {
1891                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
1892                 return (-EINVAL);
1893         }       
1894         
1895         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
1896         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
1897 }
1898
1899 int 
1900 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
1901 {
1902         uint16_t num_vfs;
1903         struct rte_eth_dev *dev;
1904         struct rte_eth_dev_info dev_info;
1905
1906         if (port_id >= nb_ports) {
1907                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
1908                 return (-ENODEV);
1909         }
1910         
1911         dev = &rte_eth_devices[port_id];
1912         rte_eth_dev_info_get(port_id, &dev_info);
1913
1914         num_vfs = dev_info.max_vfs;
1915         if (vf > num_vfs) 
1916         {
1917                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
1918                 return (-EINVAL);
1919         }
1920         
1921         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
1922         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
1923 }
1924
1925 int
1926 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id, 
1927                                  uint64_t vf_mask,uint8_t vlan_on)
1928 {
1929         struct rte_eth_dev *dev;
1930
1931         if (port_id >= nb_ports) {
1932                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
1933                                 port_id);
1934                 return (-ENODEV);
1935         }
1936         dev = &rte_eth_devices[port_id];
1937
1938         if(vlan_id > ETHER_MAX_VLAN_ID)
1939         {
1940                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
1941                         vlan_id);
1942                 return (-EINVAL);
1943         }
1944         if (vf_mask == 0)
1945         {
1946                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
1947                 return (-EINVAL);
1948         }
1949         
1950         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
1951         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
1952                                                 vf_mask,vlan_on);
1953 }
1954
1955 int
1956 rte_eth_mirror_rule_set(uint8_t port_id, 
1957                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
1958                         uint8_t rule_id, uint8_t on)
1959 {
1960         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1961
1962         if (port_id >= nb_ports) {
1963                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1964                 return (-ENODEV);
1965         }
1966         
1967         if (mirror_conf->rule_type_mask == 0) {
1968                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
1969                 return (-EINVAL);
1970         }
1971         
1972         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
1973                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
1974                         "be 0-%d\n",ETH_64_POOLS - 1);
1975                 return (-EINVAL);
1976         }
1977         
1978         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) && 
1979                 (mirror_conf->pool_mask == 0)) {
1980                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
1981                                 "be 0.\n");             
1982                 return (-EINVAL);
1983         }
1984         
1985         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
1986         {
1987                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
1988                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
1989                 return (-EINVAL);
1990         }
1991
1992         dev = &rte_eth_devices[port_id];
1993         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
1994
1995         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
1996 }
1997
1998 int
1999 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2000 {
2001         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2002
2003         if (port_id >= nb_ports) {
2004                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2005                 return (-ENODEV);
2006         }
2007
2008         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2009         {
2010                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2011                         ETH_VMDQ_NUM_MIRROR_RULE-1);
2012                 return (-EINVAL);
2013         }
2014
2015         dev = &rte_eth_devices[port_id];
2016         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2017
2018         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2019 }
2020
2021 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2022 uint16_t
2023 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2024                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2025 {
2026         struct rte_eth_dev *dev;
2027
2028         if (port_id >= nb_ports) {
2029                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2030                 return 0;
2031         }
2032         dev = &rte_eth_devices[port_id];
2033         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
2034         if (queue_id >= dev->data->nb_rx_queues) {
2035                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2036                 return 0;
2037         }
2038         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2039                                                 rx_pkts, nb_pkts);
2040 }
2041
2042 uint16_t
2043 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2044                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2045 {
2046         struct rte_eth_dev *dev;
2047
2048         if (port_id >= nb_ports) {
2049                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2050                 return 0;
2051         }
2052         dev = &rte_eth_devices[port_id];
2053
2054         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
2055         if (queue_id >= dev->data->nb_tx_queues) {
2056                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2057                 return 0;
2058         }
2059         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2060                                                 tx_pkts, nb_pkts);
2061 }
2062
2063 uint32_t
2064 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2065 {
2066         struct rte_eth_dev *dev;
2067
2068         if (port_id >= nb_ports) {
2069                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2070                 return 0;
2071         }
2072         dev = &rte_eth_devices[port_id];
2073         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2074         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);  
2075 }
2076
2077 int
2078 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2079 {
2080         struct rte_eth_dev *dev;
2081
2082         if (port_id >= nb_ports) {
2083                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2084                 return (-ENODEV);
2085         }
2086         dev = &rte_eth_devices[port_id];
2087         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2088         return (*dev->dev_ops->rx_descriptor_done)( \
2089                 dev->data->rx_queues[queue_id], offset);
2090 }
2091 #endif
2092
2093 int
2094 rte_eth_dev_callback_register(uint8_t port_id,
2095                         enum rte_eth_event_type event,
2096                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2097 {
2098         struct rte_eth_dev *dev;
2099         struct rte_eth_dev_callback *user_cb;
2100
2101         if (!cb_fn)
2102                 return (-EINVAL);
2103         if (port_id >= nb_ports) {
2104                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2105                 return (-EINVAL);
2106         }
2107
2108         dev = &rte_eth_devices[port_id];
2109         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2110
2111         TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
2112                 if (user_cb->cb_fn == cb_fn &&
2113                         user_cb->cb_arg == cb_arg &&
2114                         user_cb->event == event) {
2115                         break;
2116                 }
2117         }
2118
2119         /* create a new callback. */
2120         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2121                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2122                 user_cb->cb_fn = cb_fn;
2123                 user_cb->cb_arg = cb_arg;
2124                 user_cb->event = event;
2125                 TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
2126         }
2127
2128         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2129         return ((user_cb == NULL) ? -ENOMEM : 0);
2130 }
2131
2132 int
2133 rte_eth_dev_callback_unregister(uint8_t port_id,
2134                         enum rte_eth_event_type event,
2135                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2136 {
2137         int ret;
2138         struct rte_eth_dev *dev;
2139         struct rte_eth_dev_callback *cb, *next;
2140
2141         if (!cb_fn)
2142                 return (-EINVAL);
2143         if (port_id >= nb_ports) {
2144                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2145                 return (-EINVAL);
2146         }
2147
2148         dev = &rte_eth_devices[port_id];
2149         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2150
2151         ret = 0;
2152         for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
2153
2154                 next = TAILQ_NEXT(cb, next);
2155
2156                 if (cb->cb_fn != cb_fn || cb->event != event ||
2157                                 (cb->cb_arg != (void *)-1 &&
2158                                 cb->cb_arg != cb_arg))
2159                         continue;
2160
2161                 /*
2162                  * if this callback is not executing right now,
2163                  * then remove it.
2164                  */
2165                 if (cb->active == 0) {
2166                         TAILQ_REMOVE(&(dev->callbacks), cb, next);
2167                         rte_free(cb);
2168                 } else {
2169                         ret = -EAGAIN;
2170                 }
2171         }
2172
2173         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2174         return (ret);
2175 }
2176
2177 void
2178 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2179         enum rte_eth_event_type event)
2180 {
2181         struct rte_eth_dev_callback *cb_lst;
2182         struct rte_eth_dev_callback dev_cb;
2183
2184         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2185         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
2186                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2187                         continue;
2188                 dev_cb = *cb_lst;
2189                 cb_lst->active = 1;
2190                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2191                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2192                                                 dev_cb.cb_arg);
2193                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2194                 cb_lst->active = 0;
2195         }
2196         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2197 }
2198 #ifdef RTE_NIC_BYPASS
2199 int rte_eth_dev_bypass_init(uint8_t port_id)
2200 {
2201         struct rte_eth_dev *dev;
2202
2203         if (port_id >= nb_ports) {
2204                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2205                 return (-ENODEV);
2206         }
2207
2208         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2209                 PMD_DEBUG_TRACE("Invalid port device\n");
2210                 return (-ENODEV);
2211         }
2212
2213         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2214         (*dev->dev_ops->bypass_init)(dev);
2215         return 0;
2216 }
2217
2218 int
2219 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2220 {
2221         struct rte_eth_dev *dev;
2222
2223         if (port_id >= nb_ports) {
2224                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2225                 return (-ENODEV);
2226         }
2227
2228         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2229                 PMD_DEBUG_TRACE("Invalid port device\n");
2230                 return (-ENODEV);
2231         }
2232         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2233         (*dev->dev_ops->bypass_state_show)(dev, state);
2234         return 0;
2235 }
2236
2237 int
2238 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2239 {
2240         struct rte_eth_dev *dev;
2241
2242         if (port_id >= nb_ports) {
2243                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2244                 return (-ENODEV);
2245         }
2246
2247         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2248                 PMD_DEBUG_TRACE("Invalid port device\n");
2249                 return (-ENODEV);
2250         }
2251
2252         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2253         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2254         return 0;
2255 }
2256
2257 int
2258 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2259 {
2260         struct rte_eth_dev *dev;
2261
2262         if (port_id >= nb_ports) {
2263                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2264                 return (-ENODEV);
2265         }
2266
2267         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2268                 PMD_DEBUG_TRACE("Invalid port device\n");
2269                 return (-ENODEV);
2270         }
2271
2272         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2273         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2274         return 0;
2275 }
2276
2277 int
2278 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2279 {
2280         struct rte_eth_dev *dev;
2281
2282         if (port_id >= nb_ports) {
2283                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2284                 return (-ENODEV);
2285         }
2286
2287         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2288                 PMD_DEBUG_TRACE("Invalid port device\n");
2289                 return (-ENODEV);
2290         }
2291
2292         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2293         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2294         return 0;
2295 }
2296
2297 int
2298 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2299 {
2300         struct rte_eth_dev *dev;
2301
2302         if (port_id >= nb_ports) {
2303                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2304                 return (-ENODEV);
2305         }
2306
2307         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2308                 PMD_DEBUG_TRACE("Invalid port device\n");
2309                 return (-ENODEV);
2310         }
2311
2312         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2313         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2314         return 0;
2315 }
2316
2317 int
2318 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2319 {
2320         struct rte_eth_dev *dev;
2321
2322         if (port_id >= nb_ports) {
2323                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2324                 return (-ENODEV);
2325         }
2326
2327         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2328                 PMD_DEBUG_TRACE("Invalid port device\n");
2329                 return (-ENODEV);
2330         }
2331
2332         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2333         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2334         return 0;
2335 }
2336
2337 int
2338 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2339 {
2340         struct rte_eth_dev *dev;
2341
2342         if (port_id >= nb_ports) {
2343                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2344                 return (-ENODEV);
2345         }
2346
2347         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2348                 PMD_DEBUG_TRACE("Invalid port device\n");
2349                 return (-ENODEV);
2350         }
2351
2352         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2353         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2354         return 0;
2355 }
2356
2357 int
2358 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2359 {
2360         struct rte_eth_dev *dev;
2361
2362         if (port_id >= nb_ports) {
2363                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2364                 return (-ENODEV);
2365         }
2366
2367         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2368                 PMD_DEBUG_TRACE("Invalid port device\n");
2369                 return (-ENODEV);
2370         }
2371
2372         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2373         (*dev->dev_ops->bypass_wd_reset)(dev);
2374         return 0;
2375 }
2376 #endif