update Intel copyright years to 2014
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44
45 #include <rte_byteorder.h>
46 #include <rte_log.h>
47 #include <rte_debug.h>
48 #include <rte_interrupts.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_ring.h>
62 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_mbuf.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
72 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
73                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
74         } while (0)
75 #else
76 #define PMD_DEBUG_TRACE(fmt, args...)
77 #endif
78
79 /* Macros for checking for restricting functions to primary instance only */
80 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
81         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
82                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
83                 return (retval); \
84         } \
85 } while(0)
86 #define PROC_PRIMARY_OR_RET() do { \
87         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
88                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
89                 return; \
90         } \
91 } while(0)
92
93 /* Macros to check for invlaid function pointers in dev_ops structure */
94 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
95         if ((func) == NULL) { \
96                 PMD_DEBUG_TRACE("Function not supported\n"); \
97                 return (retval); \
98         } \
99 } while(0)
100 #define FUNC_PTR_OR_RET(func) do { \
101         if ((func) == NULL) { \
102                 PMD_DEBUG_TRACE("Function not supported\n"); \
103                 return; \
104         } \
105 } while(0)
106
107 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
108 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
109 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
110 static uint8_t nb_ports = 0;
111
112 /* spinlock for eth device callbacks */
113 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
114
115 /**
116  * The user application callback description.
117  *
118  * It contains callback address to be registered by user application,
119  * the pointer to the parameters for callback, and the event type.
120  */
121 struct rte_eth_dev_callback {
122         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
123         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
124         void *cb_arg;                           /**< Parameter for callback */
125         enum rte_eth_event_type event;          /**< Interrupt event type */
126         uint32_t active;                        /**< Callback is executing */
127 };
128
129 enum {
130         STAT_QMAP_TX = 0,
131         STAT_QMAP_RX
132 };
133
134 static inline void
135 rte_eth_dev_data_alloc(void)
136 {
137         const unsigned flags = 0;
138         const struct rte_memzone *mz;
139
140         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
141                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
142                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
143                                 rte_socket_id(), flags);
144         } else
145                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
146         if (mz == NULL)
147                 rte_panic("Cannot allocate memzone for ethernet port data\n");
148
149         rte_eth_dev_data = mz->addr;
150         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
151                 memset(rte_eth_dev_data, 0,
152                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
153 }
154
155 struct rte_eth_dev *
156 rte_eth_dev_allocate(void)
157 {
158         struct rte_eth_dev *eth_dev;
159
160         if (nb_ports == RTE_MAX_ETHPORTS) {
161                 PMD_DEBUG_TRACE("Reached maximum number of ethernet ports\n");
162                 return NULL;
163         }
164
165         if (rte_eth_dev_data == NULL)
166                 rte_eth_dev_data_alloc();
167
168         eth_dev = &rte_eth_devices[nb_ports];
169         eth_dev->data = &rte_eth_dev_data[nb_ports];
170         eth_dev->data->port_id = nb_ports++;
171         return eth_dev;
172 }
173
174 static int
175 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
176                  struct rte_pci_device *pci_dev)
177 {
178         struct eth_driver    *eth_drv;
179         struct rte_eth_dev *eth_dev;
180         int diag;
181
182         eth_drv = (struct eth_driver *)pci_drv;
183
184         eth_dev = rte_eth_dev_allocate();
185         if (eth_dev == NULL)
186                 return -ENOMEM;
187
188         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
189                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
190                                   eth_drv->dev_private_size,
191                                   CACHE_LINE_SIZE);
192                 if (eth_dev->data->dev_private == NULL)
193                         rte_panic("Cannot allocate memzone for private port data\n");
194         }
195         eth_dev->pci_dev = pci_dev;
196         eth_dev->driver = eth_drv;
197         eth_dev->data->rx_mbuf_alloc_failed = 0;
198
199         /* init user callbacks */
200         TAILQ_INIT(&(eth_dev->callbacks));
201
202         /*
203          * Set the default maximum frame size.
204          */
205         eth_dev->data->max_frame_size = ETHER_MAX_LEN;
206
207         /* Invoke PMD device initialization function */
208         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
209         if (diag == 0)
210                 return (0);
211
212         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
213                         " failed\n", pci_drv->name,
214                         (unsigned) pci_dev->id.vendor_id,
215                         (unsigned) pci_dev->id.device_id);
216         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
217                 rte_free(eth_dev->data->dev_private);
218         nb_ports--;
219         return diag;
220 }
221
222 /**
223  * Register an Ethernet [Poll Mode] driver.
224  *
225  * Function invoked by the initialization function of an Ethernet driver
226  * to simultaneously register itself as a PCI driver and as an Ethernet
227  * Poll Mode Driver.
228  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
229  * structure embedded in the *eth_drv* structure, after having stored the
230  * address of the rte_eth_dev_init() function in the *devinit* field of
231  * the *pci_drv* structure.
232  * During the PCI probing phase, the rte_eth_dev_init() function is
233  * invoked for each PCI [Ethernet device] matching the embedded PCI
234  * identifiers provided by the driver.
235  */
236 void
237 rte_eth_driver_register(struct eth_driver *eth_drv)
238 {
239         eth_drv->pci_drv.devinit = rte_eth_dev_init;
240         rte_eal_pci_register(&eth_drv->pci_drv);
241 }
242
243 int
244 rte_eth_dev_socket_id(uint8_t port_id)
245 {
246         if (port_id >= nb_ports)
247                 return -1;
248         return rte_eth_devices[port_id].pci_dev->numa_node;
249 }
250
251 uint8_t
252 rte_eth_dev_count(void)
253 {
254         return (nb_ports);
255 }
256
257 static int
258 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
259 {
260         uint16_t old_nb_queues = dev->data->nb_rx_queues;
261         void **rxq;
262         unsigned i;
263
264         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
265
266         if (dev->data->rx_queues == NULL) {
267                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
268                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
269                                 CACHE_LINE_SIZE);
270                 if (dev->data->rx_queues == NULL) {
271                         dev->data->nb_rx_queues = 0;
272                         return -(ENOMEM);
273                 }
274         } else {
275                 rxq = dev->data->rx_queues;
276
277                 for (i = nb_queues; i < old_nb_queues; i++)
278                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
279                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
280                                 CACHE_LINE_SIZE);
281                 if (rxq == NULL)
282                         return -(ENOMEM);
283
284                 if (nb_queues > old_nb_queues)
285                         memset(rxq + old_nb_queues, 0,
286                                 sizeof(rxq[0]) * (nb_queues - old_nb_queues));
287
288                 dev->data->rx_queues = rxq;
289
290         }
291         dev->data->nb_rx_queues = nb_queues;
292         return (0);
293 }
294
295 static int
296 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
297 {
298         uint16_t old_nb_queues = dev->data->nb_tx_queues;
299         void **txq;
300         unsigned i;
301
302         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
303
304         if (dev->data->tx_queues == NULL) {
305                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
306                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
307                                 CACHE_LINE_SIZE);
308                 if (dev->data->tx_queues == NULL) {
309                         dev->data->nb_tx_queues = 0;
310                         return -(ENOMEM);
311                 }
312         } else {
313                 txq = dev->data->tx_queues;
314
315                 for (i = nb_queues; i < old_nb_queues; i++)
316                         (*dev->dev_ops->tx_queue_release)(txq[i]);
317                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
318                                 CACHE_LINE_SIZE);
319                 if (txq == NULL)
320                         return -(ENOMEM);
321
322                 if (nb_queues > old_nb_queues)
323                         memset(txq + old_nb_queues, 0,
324                                 sizeof(txq[0]) * (nb_queues - old_nb_queues));
325
326                 dev->data->tx_queues = txq;
327
328         }
329         dev->data->nb_tx_queues = nb_queues;
330         return (0);
331 }
332
333 static int
334 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
335                       const struct rte_eth_conf *dev_conf)
336 {
337         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
338
339         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
340                 /* check multi-queue mode */
341                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) || 
342                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
343                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
344                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
345                         /* SRIOV only works in VMDq enable mode */
346                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
347                                         "wrong VMDQ mq_mode rx %d tx %d\n", 
348                                         port_id, dev_conf->rxmode.mq_mode,
349                                         dev_conf->txmode.mq_mode);
350                         return (-EINVAL);
351                 }
352
353                 switch (dev_conf->rxmode.mq_mode) {
354                 case ETH_MQ_RX_VMDQ_RSS:
355                 case ETH_MQ_RX_VMDQ_DCB:
356                 case ETH_MQ_RX_VMDQ_DCB_RSS:
357                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
358                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
359                                         "unsupported VMDQ mq_mode rx %d\n", 
360                                         port_id, dev_conf->rxmode.mq_mode);
361                         return (-EINVAL);
362                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
363                         /* if nothing mq mode configure, use default scheme */
364                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
365                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
366                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
367                         break;
368                 }
369
370                 switch (dev_conf->txmode.mq_mode) {
371                 case ETH_MQ_TX_VMDQ_DCB:
372                         /* DCB VMDQ in SRIOV mode, not implement yet */
373                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
374                                         "unsupported VMDQ mq_mode tx %d\n", 
375                                         port_id, dev_conf->txmode.mq_mode);
376                         return (-EINVAL);
377                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
378                         /* if nothing mq mode configure, use default scheme */
379                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
380                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
381                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
382                         break;
383                 }
384
385                 /* check valid queue number */
386                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
387                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
388                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
389                                     "queue number must less equal to %d\n", 
390                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
391                         return (-EINVAL);
392                 }
393         } else {
394                 /* For vmdb+dcb mode check our configuration before we go further */
395                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
396                         const struct rte_eth_vmdq_dcb_conf *conf;
397                         
398                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
399                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
400                                                 "!= %d\n",
401                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
402                                 return (-EINVAL);
403                         }
404                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
405                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
406                                conf->nb_queue_pools == ETH_32_POOLS)) {
407                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
408                                                 "nb_queue_pools must be %d or %d\n",
409                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
410                                 return (-EINVAL);
411                         }
412                 }
413                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
414                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
415                         
416                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
417                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
418                                                 "!= %d\n",
419                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
420                                 return (-EINVAL);
421                         }
422                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
423                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
424                                conf->nb_queue_pools == ETH_32_POOLS)) {
425                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
426                                                 "nb_queue_pools != %d or nb_queue_pools "
427                                                 "!= %d\n",
428                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
429                                 return (-EINVAL);
430                         }
431                 }
432                 
433                 /* For DCB mode check our configuration before we go further */
434                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
435                         const struct rte_eth_dcb_rx_conf *conf;
436                         
437                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
438                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
439                                                 "!= %d\n",
440                                                 port_id, ETH_DCB_NUM_QUEUES);
441                                 return (-EINVAL);
442                         }
443                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
444                         if (! (conf->nb_tcs == ETH_4_TCS ||
445                                conf->nb_tcs == ETH_8_TCS)) {
446                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
447                                                 "nb_tcs != %d or nb_tcs "
448                                                 "!= %d\n",
449                                                 port_id, ETH_4_TCS, ETH_8_TCS);
450                                 return (-EINVAL);
451                         }
452                 }
453                 
454                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
455                         const struct rte_eth_dcb_tx_conf *conf;
456                         
457                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
458                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
459                                                 "!= %d\n",
460                                                 port_id, ETH_DCB_NUM_QUEUES);
461                                 return (-EINVAL);
462                         }
463                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
464                         if (! (conf->nb_tcs == ETH_4_TCS ||
465                                conf->nb_tcs == ETH_8_TCS)) {
466                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
467                                                 "nb_tcs != %d or nb_tcs "
468                                                 "!= %d\n",
469                                                 port_id, ETH_4_TCS, ETH_8_TCS);
470                                 return (-EINVAL);
471                         }
472                 }
473         }
474         return 0;
475 }
476
477 int
478 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
479                       const struct rte_eth_conf *dev_conf)
480 {
481         struct rte_eth_dev *dev;
482         struct rte_eth_dev_info dev_info;
483         int diag;
484
485         /* This function is only safe when called from the primary process
486          * in a multi-process setup*/
487         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
488
489         if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
490                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
491                 return (-EINVAL);
492         }
493         dev = &rte_eth_devices[port_id];
494
495         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
496         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
497
498         if (dev->data->dev_started) {
499                 PMD_DEBUG_TRACE(
500                     "port %d must be stopped to allow configuration\n", port_id);
501                 return (-EBUSY);
502         }
503
504         /*
505          * Check that the numbers of RX and TX queues are not greater
506          * than the maximum number of RX and TX queues supported by the
507          * configured device.
508          */
509         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
510         if (nb_rx_q > dev_info.max_rx_queues) {
511                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
512                                 port_id, nb_rx_q, dev_info.max_rx_queues);
513                 return (-EINVAL);
514         }
515         if (nb_rx_q == 0) {
516                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
517                 return (-EINVAL);
518         }
519
520         if (nb_tx_q > dev_info.max_tx_queues) {
521                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
522                                 port_id, nb_tx_q, dev_info.max_tx_queues);
523                 return (-EINVAL);
524         }
525         if (nb_tx_q == 0) {
526                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
527                 return (-EINVAL);
528         }
529
530         /* Copy the dev_conf parameter into the dev structure */
531         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
532
533         /*
534          * If jumbo frames are enabled, check that the maximum RX packet
535          * length is supported by the configured device.
536          */
537         if (dev_conf->rxmode.jumbo_frame == 1) {
538                 if (dev_conf->rxmode.max_rx_pkt_len >
539                     dev_info.max_rx_pktlen) {
540                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
541                                 " > max valid value %u\n",
542                                 port_id,
543                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
544                                 (unsigned)dev_info.max_rx_pktlen);
545                         return (-EINVAL);
546                 }
547                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
548                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
549                                 " < min valid value %u\n",
550                                 port_id,
551                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
552                                 (unsigned)ETHER_MIN_LEN);
553                         return (-EINVAL);
554                 }
555         } else
556                 /* Use default value */
557                 dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
558
559         /* multipe queue mode checking */
560         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
561         if (diag != 0) {
562                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
563                                 port_id, diag);
564                 return diag;
565         }
566
567         /*
568          * Setup new number of RX/TX queues and reconfigure device.
569          */
570         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
571         if (diag != 0) {
572                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
573                                 port_id, diag);
574                 return diag;
575         }
576
577         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
578         if (diag != 0) {
579                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
580                                 port_id, diag);
581                 rte_eth_dev_rx_queue_config(dev, 0);
582                 return diag;
583         }
584
585         diag = (*dev->dev_ops->dev_configure)(dev);
586         if (diag != 0) {
587                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
588                                 port_id, diag);
589                 rte_eth_dev_rx_queue_config(dev, 0);
590                 rte_eth_dev_tx_queue_config(dev, 0);
591                 return diag;
592         }
593
594         return 0;
595 }
596
597 static void
598 rte_eth_dev_config_restore(uint8_t port_id)
599 {
600         struct rte_eth_dev *dev;
601         struct rte_eth_dev_info dev_info;
602         struct ether_addr addr;
603         uint16_t i;
604         uint32_t pool = 0;
605
606         dev = &rte_eth_devices[port_id];
607
608         rte_eth_dev_info_get(port_id, &dev_info);
609
610         if (RTE_ETH_DEV_SRIOV(dev).active)
611                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
612
613         /* replay MAC address configuration */
614         for (i = 0; i < dev_info.max_mac_addrs; i++) {
615                 addr = dev->data->mac_addrs[i];
616
617                 /* skip zero address */
618                 if (is_zero_ether_addr(&addr))
619                         continue;
620
621                 /* add address to the hardware */
622                 if  (*dev->dev_ops->mac_addr_add)
623                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
624                 else {
625                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
626                                         port_id);
627                         /* exit the loop but not return an error */
628                         break;
629                 }
630         }
631
632         /* replay promiscuous configuration */
633         if (rte_eth_promiscuous_get(port_id) == 1)
634                 rte_eth_promiscuous_enable(port_id);
635         else if (rte_eth_promiscuous_get(port_id) == 0)
636                 rte_eth_promiscuous_disable(port_id);
637
638         /* replay allmulticast configuration */
639         if (rte_eth_allmulticast_get(port_id) == 1)
640                 rte_eth_allmulticast_enable(port_id);
641         else if (rte_eth_allmulticast_get(port_id) == 0)
642                 rte_eth_allmulticast_disable(port_id);
643 }
644
645 int
646 rte_eth_dev_start(uint8_t port_id)
647 {
648         struct rte_eth_dev *dev;
649         int diag;
650
651         /* This function is only safe when called from the primary process
652          * in a multi-process setup*/
653         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
654
655         if (port_id >= nb_ports) {
656                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
657                 return (-EINVAL);
658         }
659         dev = &rte_eth_devices[port_id];
660
661         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
662         diag = (*dev->dev_ops->dev_start)(dev);
663         if (diag == 0)
664                 dev->data->dev_started = 1;
665         else
666                 return diag;
667
668         rte_eth_dev_config_restore(port_id);
669
670         return 0;
671 }
672
673 void
674 rte_eth_dev_stop(uint8_t port_id)
675 {
676         struct rte_eth_dev *dev;
677
678         /* This function is only safe when called from the primary process
679          * in a multi-process setup*/
680         PROC_PRIMARY_OR_RET();
681
682         if (port_id >= nb_ports) {
683                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
684                 return;
685         }
686         dev = &rte_eth_devices[port_id];
687
688         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
689         dev->data->dev_started = 0;
690         (*dev->dev_ops->dev_stop)(dev);
691 }
692
693 void
694 rte_eth_dev_close(uint8_t port_id)
695 {
696         struct rte_eth_dev *dev;
697
698         /* This function is only safe when called from the primary process
699          * in a multi-process setup*/
700         PROC_PRIMARY_OR_RET();
701
702         if (port_id >= nb_ports) {
703                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
704                 return;
705         }
706
707         dev = &rte_eth_devices[port_id];
708
709         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
710         dev->data->dev_started = 0;
711         (*dev->dev_ops->dev_close)(dev);
712 }
713
714 int
715 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
716                        uint16_t nb_rx_desc, unsigned int socket_id,
717                        const struct rte_eth_rxconf *rx_conf,
718                        struct rte_mempool *mp)
719 {
720         struct rte_eth_dev *dev;
721         struct rte_pktmbuf_pool_private *mbp_priv;
722         struct rte_eth_dev_info dev_info;
723
724         /* This function is only safe when called from the primary process
725          * in a multi-process setup*/
726         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
727
728         if (port_id >= nb_ports) {
729                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
730                 return (-EINVAL);
731         }
732         dev = &rte_eth_devices[port_id];
733         if (rx_queue_id >= dev->data->nb_rx_queues) {
734                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
735                 return (-EINVAL);
736         }
737
738         if (dev->data->dev_started) {
739                 PMD_DEBUG_TRACE(
740                     "port %d must be stopped to allow configuration\n", port_id);
741                 return -EBUSY;
742         }
743
744         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
745         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
746
747         /*
748          * Check the size of the mbuf data buffer.
749          * This value must be provided in the private data of the memory pool.
750          * First check that the memory pool has a valid private data.
751          */
752         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
753         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
754                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
755                                 mp->name, (int) mp->private_data_size,
756                                 (int) sizeof(struct rte_pktmbuf_pool_private));
757                 return (-ENOSPC);
758         }
759         mbp_priv = (struct rte_pktmbuf_pool_private *)
760                 ((char *)mp + sizeof(struct rte_mempool));
761         if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
762             dev_info.min_rx_bufsize) {
763                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
764                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
765                                 "=%d)\n",
766                                 mp->name,
767                                 (int)mbp_priv->mbuf_data_room_size,
768                                 (int)(RTE_PKTMBUF_HEADROOM +
769                                       dev_info.min_rx_bufsize),
770                                 (int)RTE_PKTMBUF_HEADROOM,
771                                 (int)dev_info.min_rx_bufsize);
772                 return (-EINVAL);
773         }
774
775         return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
776                                                socket_id, rx_conf, mp);
777 }
778
779 int
780 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
781                        uint16_t nb_tx_desc, unsigned int socket_id,
782                        const struct rte_eth_txconf *tx_conf)
783 {
784         struct rte_eth_dev *dev;
785
786         /* This function is only safe when called from the primary process
787          * in a multi-process setup*/
788         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
789
790         if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
791                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
792                 return (-EINVAL);
793         }
794         dev = &rte_eth_devices[port_id];
795         if (tx_queue_id >= dev->data->nb_tx_queues) {
796                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
797                 return (-EINVAL);
798         }
799
800         if (dev->data->dev_started) {
801                 PMD_DEBUG_TRACE(
802                     "port %d must be stopped to allow configuration\n", port_id);
803                 return -EBUSY;
804         }
805
806         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
807         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
808                                                socket_id, tx_conf);
809 }
810
811 void
812 rte_eth_promiscuous_enable(uint8_t port_id)
813 {
814         struct rte_eth_dev *dev;
815
816         if (port_id >= nb_ports) {
817                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
818                 return;
819         }
820         dev = &rte_eth_devices[port_id];
821
822         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
823         (*dev->dev_ops->promiscuous_enable)(dev);
824         dev->data->promiscuous = 1;
825 }
826
827 void
828 rte_eth_promiscuous_disable(uint8_t port_id)
829 {
830         struct rte_eth_dev *dev;
831
832         if (port_id >= nb_ports) {
833                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
834                 return;
835         }
836         dev = &rte_eth_devices[port_id];
837
838         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
839         dev->data->promiscuous = 0;
840         (*dev->dev_ops->promiscuous_disable)(dev);
841 }
842
843 int
844 rte_eth_promiscuous_get(uint8_t port_id)
845 {
846         struct rte_eth_dev *dev;
847
848         if (port_id >= nb_ports) {
849                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
850                 return -1;
851         }
852
853         dev = &rte_eth_devices[port_id];
854         return dev->data->promiscuous;
855 }
856
857 void
858 rte_eth_allmulticast_enable(uint8_t port_id)
859 {
860         struct rte_eth_dev *dev;
861
862         if (port_id >= nb_ports) {
863                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
864                 return;
865         }
866         dev = &rte_eth_devices[port_id];
867
868         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
869         (*dev->dev_ops->allmulticast_enable)(dev);
870         dev->data->all_multicast = 1;
871 }
872
873 void
874 rte_eth_allmulticast_disable(uint8_t port_id)
875 {
876         struct rte_eth_dev *dev;
877
878         if (port_id >= nb_ports) {
879                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
880                 return;
881         }
882         dev = &rte_eth_devices[port_id];
883
884         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
885         dev->data->all_multicast = 0;
886         (*dev->dev_ops->allmulticast_disable)(dev);
887 }
888
889 int
890 rte_eth_allmulticast_get(uint8_t port_id)
891 {
892         struct rte_eth_dev *dev;
893
894         if (port_id >= nb_ports) {
895                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
896                 return -1;
897         }
898
899         dev = &rte_eth_devices[port_id];
900         return dev->data->all_multicast;
901 }
902
903 static inline int
904 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
905                                 struct rte_eth_link *link)
906 {
907         struct rte_eth_link *dst = link;
908         struct rte_eth_link *src = &(dev->data->dev_link);
909
910         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
911                                         *(uint64_t *)src) == 0)
912                 return -1;
913
914         return 0;
915 }
916
917 void
918 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
919 {
920         struct rte_eth_dev *dev;
921
922         if (port_id >= nb_ports) {
923                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
924                 return;
925         }
926         dev = &rte_eth_devices[port_id];
927         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
928
929         if (dev->data->dev_conf.intr_conf.lsc != 0)
930                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
931         else {
932                 (*dev->dev_ops->link_update)(dev, 1);
933                 *eth_link = dev->data->dev_link;
934         }
935 }
936
937 void
938 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
939 {
940         struct rte_eth_dev *dev;
941
942         if (port_id >= nb_ports) {
943                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
944                 return;
945         }
946         dev = &rte_eth_devices[port_id];
947         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
948
949         if (dev->data->dev_conf.intr_conf.lsc != 0)
950                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
951         else {
952                 (*dev->dev_ops->link_update)(dev, 0);
953                 *eth_link = dev->data->dev_link;
954         }
955 }
956
957 void
958 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
959 {
960         struct rte_eth_dev *dev;
961
962         if (port_id >= nb_ports) {
963                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
964                 return;
965         }
966         dev = &rte_eth_devices[port_id];
967         memset(stats, 0, sizeof(*stats));
968
969         FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
970         (*dev->dev_ops->stats_get)(dev, stats);
971         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
972 }
973
974 void
975 rte_eth_stats_reset(uint8_t port_id)
976 {
977         struct rte_eth_dev *dev;
978
979         if (port_id >= nb_ports) {
980                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
981                 return;
982         }
983         dev = &rte_eth_devices[port_id];
984
985         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
986         (*dev->dev_ops->stats_reset)(dev);
987 }
988
989
990 static int
991 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
992                 uint8_t is_rx)
993 {
994         struct rte_eth_dev *dev;
995
996         if (port_id >= nb_ports) {
997                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
998                 return -ENODEV;
999         }
1000         dev = &rte_eth_devices[port_id];
1001
1002         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1003         return (*dev->dev_ops->queue_stats_mapping_set)
1004                         (dev, queue_id, stat_idx, is_rx);
1005 }
1006
1007
1008 int
1009 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1010                 uint8_t stat_idx)
1011 {
1012         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1013                         STAT_QMAP_TX);
1014 }
1015
1016
1017 int
1018 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1019                 uint8_t stat_idx)
1020 {
1021         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1022                         STAT_QMAP_RX);
1023 }
1024
1025
1026 void
1027 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1028 {
1029         struct rte_eth_dev *dev;
1030
1031         if (port_id >= nb_ports) {
1032                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1033                 return;
1034         }
1035         dev = &rte_eth_devices[port_id];
1036
1037         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1038         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1039         dev_info->pci_dev = dev->pci_dev;
1040         if (dev->driver)
1041                 dev_info->driver_name = dev->driver->pci_drv.name;
1042 }
1043
1044 void
1045 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1046 {
1047         struct rte_eth_dev *dev;
1048
1049         if (port_id >= nb_ports) {
1050                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1051                 return;
1052         }
1053         dev = &rte_eth_devices[port_id];
1054         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1055 }
1056
1057 int
1058 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1059 {
1060         struct rte_eth_dev *dev;
1061
1062         if (port_id >= nb_ports) {
1063                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1064                 return (-ENODEV);
1065         }
1066         dev = &rte_eth_devices[port_id];
1067         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1068                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1069                 return (-ENOSYS);
1070         }
1071
1072         if (vlan_id > 4095) {
1073                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1074                                 port_id, (unsigned) vlan_id);
1075                 return (-EINVAL);
1076         }
1077         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1078         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1079         return (0);
1080 }
1081
1082 int
1083 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1084 {
1085         struct rte_eth_dev *dev;
1086
1087         if (port_id >= nb_ports) {
1088                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1089                 return (-ENODEV);
1090         }
1091
1092         dev = &rte_eth_devices[port_id];
1093         if (rx_queue_id >= dev->data->nb_rx_queues) {
1094                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1095                 return (-EINVAL);
1096         }
1097
1098         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1099         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1100
1101         return (0);
1102 }
1103
1104 int
1105 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1106 {
1107         struct rte_eth_dev *dev;
1108
1109         if (port_id >= nb_ports) {
1110                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1111                 return (-ENODEV);
1112         }
1113
1114         dev = &rte_eth_devices[port_id];
1115         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1116         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1117
1118         return (0);
1119 }
1120
1121 int
1122 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1123 {
1124         struct rte_eth_dev *dev;
1125         int ret = 0;
1126         int mask = 0;
1127         int cur, org = 0;
1128         
1129         if (port_id >= nb_ports) {
1130                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1131                 return (-ENODEV);
1132         }
1133
1134         dev = &rte_eth_devices[port_id];
1135
1136         /*check which option changed by application*/
1137         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1138         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1139         if (cur != org){
1140                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1141                 mask |= ETH_VLAN_STRIP_MASK;
1142         }
1143         
1144         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1145         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1146         if (cur != org){
1147                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1148                 mask |= ETH_VLAN_FILTER_MASK;
1149         }
1150
1151         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1152         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1153         if (cur != org){
1154                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1155                 mask |= ETH_VLAN_EXTEND_MASK;
1156         }
1157
1158         /*no change*/
1159         if(mask == 0)
1160                 return ret;
1161         
1162         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1163         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1164
1165         return ret;
1166 }
1167
1168 int
1169 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1170 {
1171         struct rte_eth_dev *dev;
1172         int ret = 0;
1173
1174         if (port_id >= nb_ports) {
1175                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1176                 return (-ENODEV);
1177         }
1178
1179         dev = &rte_eth_devices[port_id];
1180
1181         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1182                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1183
1184         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1185                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1186
1187         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1188                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1189
1190         return ret;
1191 }
1192
1193
1194 int
1195 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1196                                       struct rte_fdir_filter *fdir_filter,
1197                                       uint8_t queue)
1198 {
1199         struct rte_eth_dev *dev;
1200
1201         if (port_id >= nb_ports) {
1202                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1203                 return (-ENODEV);
1204         }
1205
1206         dev = &rte_eth_devices[port_id];
1207
1208         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1209                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1210                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1211                 return (-ENOSYS);
1212         }
1213
1214         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1215              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1216             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1217                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1218                                 "None l4type, source & destinations ports " \
1219                                 "should be null!\n");
1220                 return (-EINVAL);
1221         }
1222
1223         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1224         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1225                                                                 queue);
1226 }
1227
1228 int
1229 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1230                                          struct rte_fdir_filter *fdir_filter,
1231                                          uint8_t queue)
1232 {
1233         struct rte_eth_dev *dev;
1234
1235         if (port_id >= nb_ports) {
1236                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1237                 return (-ENODEV);
1238         }
1239
1240         dev = &rte_eth_devices[port_id];
1241
1242         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1243                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1244                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1245                 return (-ENOSYS);
1246         }
1247
1248         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1249              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1250             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1251                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1252                                 "None l4type, source & destinations ports " \
1253                                 "should be null!\n");
1254                 return (-EINVAL);
1255         }
1256
1257         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1258         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1259                                                                 queue);
1260
1261 }
1262
1263 int
1264 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1265                                          struct rte_fdir_filter *fdir_filter)
1266 {
1267         struct rte_eth_dev *dev;
1268
1269         if (port_id >= nb_ports) {
1270                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1271                 return (-ENODEV);
1272         }
1273
1274         dev = &rte_eth_devices[port_id];
1275
1276         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1277                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1278                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1279                 return (-ENOSYS);
1280         }
1281
1282         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1283              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1284             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1285                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1286                                 "None l4type source & destinations ports " \
1287                                 "should be null!\n");
1288                 return (-EINVAL);
1289         }
1290
1291         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1292         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1293 }
1294
1295 int
1296 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1297 {
1298         struct rte_eth_dev *dev;
1299
1300         if (port_id >= nb_ports) {
1301                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1302                 return (-ENODEV);
1303         }
1304
1305         dev = &rte_eth_devices[port_id];
1306         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1307                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1308                 return (-ENOSYS);
1309         }
1310
1311         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1312
1313         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1314         return (0);
1315 }
1316
1317 int
1318 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1319                                     struct rte_fdir_filter *fdir_filter,
1320                                     uint16_t soft_id, uint8_t queue,
1321                                     uint8_t drop)
1322 {
1323         struct rte_eth_dev *dev;
1324
1325         if (port_id >= nb_ports) {
1326                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1327                 return (-ENODEV);
1328         }
1329
1330         dev = &rte_eth_devices[port_id];
1331
1332         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1333                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1334                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1335                 return (-ENOSYS);
1336         }
1337
1338         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1339              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1340             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1341                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1342                                 "None l4type, source & destinations ports " \
1343                                 "should be null!\n");
1344                 return (-EINVAL);
1345         }
1346
1347         /* For now IPv6 is not supported with perfect filter */
1348         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1349                 return (-ENOTSUP);
1350
1351         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1352         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1353                                                                 soft_id, queue,
1354                                                                 drop);
1355 }
1356
1357 int
1358 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1359                                        struct rte_fdir_filter *fdir_filter,
1360                                        uint16_t soft_id, uint8_t queue,
1361                                        uint8_t drop)
1362 {
1363         struct rte_eth_dev *dev;
1364
1365         if (port_id >= nb_ports) {
1366                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1367                 return (-ENODEV);
1368         }
1369
1370         dev = &rte_eth_devices[port_id];
1371
1372         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1373                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1374                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1375                 return (-ENOSYS);
1376         }
1377
1378         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1379              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1380             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1381                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1382                                 "None l4type, source & destinations ports " \
1383                                 "should be null!\n");
1384                 return (-EINVAL);
1385         }
1386
1387         /* For now IPv6 is not supported with perfect filter */
1388         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1389                 return (-ENOTSUP);
1390
1391         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1392         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1393                                                         soft_id, queue, drop);
1394 }
1395
1396 int
1397 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1398                                        struct rte_fdir_filter *fdir_filter,
1399                                        uint16_t soft_id)
1400 {
1401         struct rte_eth_dev *dev;
1402
1403         if (port_id >= nb_ports) {
1404                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1405                 return (-ENODEV);
1406         }
1407
1408         dev = &rte_eth_devices[port_id];
1409
1410         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1411                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1412                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1413                 return (-ENOSYS);
1414         }
1415
1416         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1417              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1418             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1419                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1420                                 "None l4type, source & destinations ports " \
1421                                 "should be null!\n");
1422                 return (-EINVAL);
1423         }
1424
1425         /* For now IPv6 is not supported with perfect filter */
1426         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1427                 return (-ENOTSUP);
1428
1429         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1430         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
1431                                                                 soft_id);
1432 }
1433
1434 int
1435 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1436 {
1437         struct rte_eth_dev *dev;
1438
1439         if (port_id >= nb_ports) {
1440                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1441                 return (-ENODEV);
1442         }
1443
1444         dev = &rte_eth_devices[port_id];
1445         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1446                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1447                 return (-ENOSYS);
1448         }
1449
1450         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1451         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1452 }
1453
1454 int
1455 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1456 {
1457         struct rte_eth_dev *dev;
1458
1459         if (port_id >= nb_ports) {
1460                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1461                 return (-ENODEV);
1462         }
1463
1464         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1465                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1466                 return (-EINVAL);
1467         }
1468
1469         dev = &rte_eth_devices[port_id];
1470         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1471         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1472 }
1473
1474 int
1475 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1476 {
1477         struct rte_eth_dev *dev;
1478
1479         if (port_id >= nb_ports) {
1480                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1481                 return (-ENODEV);
1482         }
1483
1484         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1485                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1486                 return (-EINVAL);
1487         }
1488
1489         dev = &rte_eth_devices[port_id];
1490         /* High water, low water validation are device specific */
1491         if  (*dev->dev_ops->priority_flow_ctrl_set)
1492                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1493         return (-ENOTSUP);
1494 }
1495
1496 int
1497 rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1498 {
1499         struct rte_eth_dev *dev;
1500         uint8_t i,j;
1501
1502         if (port_id >= nb_ports) {
1503                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1504                 return (-ENODEV);
1505         }
1506
1507         /* Invalid mask bit(s) setting */
1508         if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1509                 PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
1510                 return (-EINVAL);
1511         }
1512
1513         if (reta_conf->mask_lo != 0) {
1514                 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1515                         if ((reta_conf->mask_lo & (1ULL << i)) &&
1516                                 (reta_conf->reta[i] >= ETH_RSS_RETA_MAX_QUEUE)) {
1517                                 PMD_DEBUG_TRACE("RETA hash index output"
1518                                         "configration for port=%d,invalid"
1519                                         "queue=%d\n",port_id,reta_conf->reta[i]);
1520
1521                                 return (-EINVAL);
1522                         } 
1523                 }
1524         }
1525
1526         if (reta_conf->mask_hi != 0) {
1527                 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {       
1528                         j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
1529
1530                         /* Check if the max entry >= 128 */
1531                         if ((reta_conf->mask_hi & (1ULL << i)) && 
1532                                 (reta_conf->reta[j] >= ETH_RSS_RETA_MAX_QUEUE)) {
1533                                 PMD_DEBUG_TRACE("RETA hash index output"
1534                                         "configration for port=%d,invalid"
1535                                         "queue=%d\n",port_id,reta_conf->reta[j]);
1536
1537                                 return (-EINVAL);
1538                         }
1539                 }
1540         }
1541
1542         dev = &rte_eth_devices[port_id];
1543
1544         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1545         return (*dev->dev_ops->reta_update)(dev, reta_conf);
1546 }
1547
1548 int 
1549 rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1550 {
1551         struct rte_eth_dev *dev;
1552         
1553         if (port_id >= nb_ports) {
1554                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1555                 return (-ENODEV);
1556         }
1557
1558         if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1559                 PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
1560                 return (-EINVAL);
1561         }
1562
1563         dev = &rte_eth_devices[port_id];
1564         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1565         return (*dev->dev_ops->reta_query)(dev, reta_conf);
1566 }
1567
1568 int
1569 rte_eth_led_on(uint8_t port_id)
1570 {
1571         struct rte_eth_dev *dev;
1572
1573         if (port_id >= nb_ports) {
1574                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1575                 return (-ENODEV);
1576         }
1577
1578         dev = &rte_eth_devices[port_id];
1579         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1580         return ((*dev->dev_ops->dev_led_on)(dev));
1581 }
1582
1583 int
1584 rte_eth_led_off(uint8_t port_id)
1585 {
1586         struct rte_eth_dev *dev;
1587
1588         if (port_id >= nb_ports) {
1589                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1590                 return (-ENODEV);
1591         }
1592
1593         dev = &rte_eth_devices[port_id];
1594         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1595         return ((*dev->dev_ops->dev_led_off)(dev));
1596 }
1597
1598 /*
1599  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1600  * an empty spot.
1601  */
1602 static inline int
1603 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1604 {
1605         struct rte_eth_dev_info dev_info;
1606         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1607         unsigned i;
1608
1609         rte_eth_dev_info_get(port_id, &dev_info);
1610
1611         for (i = 0; i < dev_info.max_mac_addrs; i++)
1612                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1613                         return i;
1614
1615         return -1;
1616 }
1617
1618 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
1619
1620 int
1621 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
1622                         uint32_t pool)
1623 {
1624         struct rte_eth_dev *dev;
1625         int index;
1626         uint64_t pool_mask;
1627
1628         if (port_id >= nb_ports) {
1629                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1630                 return (-ENODEV);
1631         }
1632         dev = &rte_eth_devices[port_id];
1633         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
1634
1635         if (is_zero_ether_addr(addr)) {
1636                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", 
1637                         port_id);
1638                 return (-EINVAL);
1639         }
1640         if (pool >= ETH_64_POOLS) {
1641                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
1642                 return (-EINVAL);
1643         }
1644         
1645         index = get_mac_addr_index(port_id, addr);
1646         if (index < 0) {
1647                 index = get_mac_addr_index(port_id, &null_mac_addr);
1648                 if (index < 0) {
1649                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1650                                 port_id);
1651                         return (-ENOSPC);
1652                 }
1653         } else {
1654                 pool_mask = dev->data->mac_pool_sel[index];
1655                 
1656                 /* Check if both MAC address and pool is alread there, and do nothing */
1657                 if (pool_mask & (1ULL << pool))
1658                         return 0;
1659         }
1660
1661         /* Update NIC */
1662         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
1663
1664         /* Update address in NIC data structure */
1665         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
1666         
1667         /* Update pool bitmap in NIC data structure */
1668         dev->data->mac_pool_sel[index] |= (1ULL << pool);
1669
1670         return 0;
1671 }
1672
1673 int
1674 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
1675 {
1676         struct rte_eth_dev *dev;
1677         int index;
1678
1679         if (port_id >= nb_ports) {
1680                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1681                 return (-ENODEV);
1682         }
1683         dev = &rte_eth_devices[port_id];
1684         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
1685
1686         index = get_mac_addr_index(port_id, addr);
1687         if (index == 0) {
1688                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
1689                 return (-EADDRINUSE);
1690         } else if (index < 0)
1691                 return 0;  /* Do nothing if address wasn't found */
1692
1693         /* Update NIC */
1694         (*dev->dev_ops->mac_addr_remove)(dev, index);
1695
1696         /* Update address in NIC data structure */
1697         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
1698
1699         return 0;
1700 }
1701
1702 int 
1703 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
1704                                 uint16_t rx_mode, uint8_t on)
1705 {
1706         uint16_t num_vfs;
1707         struct rte_eth_dev *dev;
1708         struct rte_eth_dev_info dev_info;
1709
1710         if (port_id >= nb_ports) {
1711                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
1712                                 port_id);
1713                 return (-ENODEV);
1714         }       
1715         
1716         dev = &rte_eth_devices[port_id];
1717         rte_eth_dev_info_get(port_id, &dev_info);
1718
1719         num_vfs = dev_info.max_vfs;
1720         if (vf > num_vfs)
1721         {
1722                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
1723                 return (-EINVAL);
1724         }
1725         if (rx_mode == 0)
1726         {
1727                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
1728                 return (-EINVAL);       
1729         }
1730         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
1731         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
1732 }
1733
1734 /*
1735  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1736  * an empty spot.
1737  */
1738 static inline int
1739 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1740 {
1741         struct rte_eth_dev_info dev_info;
1742         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1743         unsigned i;
1744
1745         rte_eth_dev_info_get(port_id, &dev_info);
1746         if (!dev->data->hash_mac_addrs)
1747                 return -1;
1748
1749         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
1750                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
1751                         ETHER_ADDR_LEN) == 0)
1752                         return i;
1753
1754         return -1;
1755 }
1756
1757 int
1758 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
1759                                 uint8_t on)
1760 {
1761         int index;
1762         int ret;
1763         struct rte_eth_dev *dev;
1764         
1765         if (port_id >= nb_ports) {
1766                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
1767                         port_id);
1768                 return (-ENODEV);
1769         }
1770         
1771         dev = &rte_eth_devices[port_id];
1772         if (is_zero_ether_addr(addr)) {
1773                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", 
1774                         port_id);
1775                 return (-EINVAL);
1776         }
1777
1778         index = get_hash_mac_addr_index(port_id, addr);
1779         /* Check if it's already there, and do nothing */
1780         if ((index >= 0) && (on))
1781                 return 0;
1782         
1783         if (index < 0) {
1784                 if (!on) {
1785                         PMD_DEBUG_TRACE("port %d: the MAC address was not" 
1786                                 "set in UTA\n", port_id);
1787                         return (-EINVAL);
1788                 }
1789                         
1790                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
1791                 if (index < 0) {
1792                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
1793                                         port_id);
1794                         return (-ENOSPC);
1795                 }
1796         } 
1797          
1798         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
1799         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
1800         if (ret == 0) {
1801                 /* Update address in NIC data structure */
1802                 if (on)
1803                         ether_addr_copy(addr,
1804                                         &dev->data->hash_mac_addrs[index]);
1805                 else 
1806                         ether_addr_copy(&null_mac_addr,
1807                                         &dev->data->hash_mac_addrs[index]);
1808         }
1809         
1810         return ret;
1811 }
1812
1813 int
1814 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
1815 {
1816         struct rte_eth_dev *dev;
1817         
1818         if (port_id >= nb_ports) {
1819                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
1820                         port_id);
1821                 return (-ENODEV);
1822         }
1823         
1824         dev = &rte_eth_devices[port_id];
1825
1826         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
1827         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
1828 }
1829
1830 int 
1831 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
1832 {
1833         uint16_t num_vfs;
1834         struct rte_eth_dev *dev;
1835         struct rte_eth_dev_info dev_info;
1836
1837         if (port_id >= nb_ports) {
1838                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1839                 return (-ENODEV);
1840         }
1841         
1842         dev = &rte_eth_devices[port_id];
1843         rte_eth_dev_info_get(port_id, &dev_info);
1844         
1845         num_vfs = dev_info.max_vfs;
1846         if (vf > num_vfs) 
1847         {
1848                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
1849                 return (-EINVAL);
1850         }       
1851         
1852         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
1853         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
1854 }
1855
1856 int 
1857 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
1858 {
1859         uint16_t num_vfs;
1860         struct rte_eth_dev *dev;
1861         struct rte_eth_dev_info dev_info;
1862
1863         if (port_id >= nb_ports) {
1864                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
1865                 return (-ENODEV);
1866         }
1867         
1868         dev = &rte_eth_devices[port_id];
1869         rte_eth_dev_info_get(port_id, &dev_info);
1870
1871         num_vfs = dev_info.max_vfs;
1872         if (vf > num_vfs) 
1873         {
1874                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
1875                 return (-EINVAL);
1876         }
1877         
1878         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
1879         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
1880 }
1881
1882 int
1883 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id, 
1884                                  uint64_t vf_mask,uint8_t vlan_on)
1885 {
1886         struct rte_eth_dev *dev;
1887
1888         if (port_id >= nb_ports) {
1889                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
1890                                 port_id);
1891                 return (-ENODEV);
1892         }
1893         dev = &rte_eth_devices[port_id];
1894
1895         if(vlan_id > ETHER_MAX_VLAN_ID)
1896         {
1897                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
1898                         vlan_id);
1899                 return (-EINVAL);
1900         }
1901         if (vf_mask == 0)
1902         {
1903                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
1904                 return (-EINVAL);
1905         }
1906         
1907         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
1908         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
1909                                                 vf_mask,vlan_on);
1910 }
1911
1912 int
1913 rte_eth_mirror_rule_set(uint8_t port_id, 
1914                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
1915                         uint8_t rule_id, uint8_t on)
1916 {
1917         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1918
1919         if (port_id >= nb_ports) {
1920                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1921                 return (-ENODEV);
1922         }
1923         
1924         if (mirror_conf->rule_type_mask == 0) {
1925                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
1926                 return (-EINVAL);
1927         }
1928         
1929         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
1930                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
1931                         "be 0-%d\n",ETH_64_POOLS - 1);
1932                 return (-EINVAL);
1933         }
1934         
1935         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) && 
1936                 (mirror_conf->pool_mask == 0)) {
1937                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
1938                                 "be 0.\n");             
1939                 return (-EINVAL);
1940         }
1941         
1942         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
1943         {
1944                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
1945                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
1946                 return (-EINVAL);
1947         }
1948
1949         dev = &rte_eth_devices[port_id];
1950         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
1951
1952         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
1953 }
1954
1955 int
1956 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
1957 {
1958         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1959
1960         if (port_id >= nb_ports) {
1961                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1962                 return (-ENODEV);
1963         }
1964
1965         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
1966         {
1967                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
1968                         ETH_VMDQ_NUM_MIRROR_RULE-1);
1969                 return (-EINVAL);
1970         }
1971
1972         dev = &rte_eth_devices[port_id];
1973         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
1974
1975         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
1976 }
1977
1978 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1979 uint16_t
1980 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
1981                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1982 {
1983         struct rte_eth_dev *dev;
1984
1985         if (port_id >= nb_ports) {
1986                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1987                 return 0;
1988         }
1989         dev = &rte_eth_devices[port_id];
1990         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
1991         if (queue_id >= dev->data->nb_rx_queues) {
1992                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
1993                 return 0;
1994         }
1995         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
1996                                                 rx_pkts, nb_pkts);
1997 }
1998
1999 uint16_t
2000 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2001                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2002 {
2003         struct rte_eth_dev *dev;
2004
2005         if (port_id >= nb_ports) {
2006                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2007                 return 0;
2008         }
2009         dev = &rte_eth_devices[port_id];
2010
2011         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
2012         if (queue_id >= dev->data->nb_tx_queues) {
2013                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2014                 return 0;
2015         }
2016         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2017                                                 tx_pkts, nb_pkts);
2018 }
2019
2020 uint32_t
2021 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2022 {
2023         struct rte_eth_dev *dev;
2024
2025         if (port_id >= nb_ports) {
2026                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2027                 return 0;
2028         }
2029         dev = &rte_eth_devices[port_id];
2030         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
2031         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);  
2032 }
2033
2034 int
2035 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2036 {
2037         struct rte_eth_dev *dev;
2038
2039         if (port_id >= nb_ports) {
2040                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2041                 return (-ENODEV);
2042         }
2043         dev = &rte_eth_devices[port_id];
2044         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2045         return (*dev->dev_ops->rx_descriptor_done)( \
2046                 dev->data->rx_queues[queue_id], offset);
2047 }
2048 #endif
2049
2050 int
2051 rte_eth_dev_callback_register(uint8_t port_id,
2052                         enum rte_eth_event_type event,
2053                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2054 {
2055         struct rte_eth_dev *dev;
2056         struct rte_eth_dev_callback *user_cb;
2057
2058         if (!cb_fn)
2059                 return (-EINVAL);
2060         if (port_id >= nb_ports) {
2061                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2062                 return (-EINVAL);
2063         }
2064
2065         dev = &rte_eth_devices[port_id];
2066         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2067
2068         TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
2069                 if (user_cb->cb_fn == cb_fn &&
2070                         user_cb->cb_arg == cb_arg &&
2071                         user_cb->event == event) {
2072                         break;
2073                 }
2074         }
2075
2076         /* create a new callback. */
2077         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2078                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2079                 user_cb->cb_fn = cb_fn;
2080                 user_cb->cb_arg = cb_arg;
2081                 user_cb->event = event;
2082                 TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
2083         }
2084
2085         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2086         return ((user_cb == NULL) ? -ENOMEM : 0);
2087 }
2088
2089 int
2090 rte_eth_dev_callback_unregister(uint8_t port_id,
2091                         enum rte_eth_event_type event,
2092                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2093 {
2094         int ret;
2095         struct rte_eth_dev *dev;
2096         struct rte_eth_dev_callback *cb, *next;
2097
2098         if (!cb_fn)
2099                 return (-EINVAL);
2100         if (port_id >= nb_ports) {
2101                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2102                 return (-EINVAL);
2103         }
2104
2105         dev = &rte_eth_devices[port_id];
2106         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2107
2108         ret = 0;
2109         for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
2110
2111                 next = TAILQ_NEXT(cb, next);
2112
2113                 if (cb->cb_fn != cb_fn || cb->event != event ||
2114                                 (cb->cb_arg != (void *)-1 &&
2115                                 cb->cb_arg != cb_arg))
2116                         continue;
2117
2118                 /*
2119                  * if this callback is not executing right now,
2120                  * then remove it.
2121                  */
2122                 if (cb->active == 0) {
2123                         TAILQ_REMOVE(&(dev->callbacks), cb, next);
2124                         rte_free(cb);
2125                 } else {
2126                         ret = -EAGAIN;
2127                 }
2128         }
2129
2130         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2131         return (ret);
2132 }
2133
2134 void
2135 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2136         enum rte_eth_event_type event)
2137 {
2138         struct rte_eth_dev_callback *cb_lst;
2139         struct rte_eth_dev_callback dev_cb;
2140
2141         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2142         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
2143                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2144                         continue;
2145                 dev_cb = *cb_lst;
2146                 cb_lst->active = 1;
2147                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2148                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2149                                                 dev_cb.cb_arg);
2150                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2151                 cb_lst->active = 0;
2152         }
2153         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2154 }
2155 #ifdef RTE_NIC_BYPASS
2156 int rte_eth_dev_bypass_init(uint8_t port_id)
2157 {
2158         struct rte_eth_dev *dev;
2159
2160         if (port_id >= nb_ports) {
2161                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2162                 return (-ENODEV);
2163         }
2164
2165         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2166                 PMD_DEBUG_TRACE("Invalid port device\n");
2167                 return (-ENODEV);
2168         }
2169
2170         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2171         (*dev->dev_ops->bypass_init)(dev);
2172         return 0;
2173 }
2174
2175 int
2176 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2177 {
2178         struct rte_eth_dev *dev;
2179
2180         if (port_id >= nb_ports) {
2181                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2182                 return (-ENODEV);
2183         }
2184
2185         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2186                 PMD_DEBUG_TRACE("Invalid port device\n");
2187                 return (-ENODEV);
2188         }
2189         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2190         (*dev->dev_ops->bypass_state_show)(dev, state);
2191         return 0;
2192 }
2193
2194 int
2195 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2196 {
2197         struct rte_eth_dev *dev;
2198
2199         if (port_id >= nb_ports) {
2200                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2201                 return (-ENODEV);
2202         }
2203
2204         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2205                 PMD_DEBUG_TRACE("Invalid port device\n");
2206                 return (-ENODEV);
2207         }
2208
2209         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2210         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2211         return 0;
2212 }
2213
2214 int
2215 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2216 {
2217         struct rte_eth_dev *dev;
2218
2219         if (port_id >= nb_ports) {
2220                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2221                 return (-ENODEV);
2222         }
2223
2224         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2225                 PMD_DEBUG_TRACE("Invalid port device\n");
2226                 return (-ENODEV);
2227         }
2228
2229         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2230         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2231         return 0;
2232 }
2233
2234 int
2235 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2236 {
2237         struct rte_eth_dev *dev;
2238
2239         if (port_id >= nb_ports) {
2240                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2241                 return (-ENODEV);
2242         }
2243
2244         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2245                 PMD_DEBUG_TRACE("Invalid port device\n");
2246                 return (-ENODEV);
2247         }
2248
2249         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2250         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2251         return 0;
2252 }
2253
2254 int
2255 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2256 {
2257         struct rte_eth_dev *dev;
2258
2259         if (port_id >= nb_ports) {
2260                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2261                 return (-ENODEV);
2262         }
2263
2264         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2265                 PMD_DEBUG_TRACE("Invalid port device\n");
2266                 return (-ENODEV);
2267         }
2268
2269         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2270         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2271         return 0;
2272 }
2273
2274 int
2275 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2276 {
2277         struct rte_eth_dev *dev;
2278
2279         if (port_id >= nb_ports) {
2280                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2281                 return (-ENODEV);
2282         }
2283
2284         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2285                 PMD_DEBUG_TRACE("Invalid port device\n");
2286                 return (-ENODEV);
2287         }
2288
2289         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2290         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2291         return 0;
2292 }
2293
2294 int
2295 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2296 {
2297         struct rte_eth_dev *dev;
2298
2299         if (port_id >= nb_ports) {
2300                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2301                 return (-ENODEV);
2302         }
2303
2304         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2305                 PMD_DEBUG_TRACE("Invalid port device\n");
2306                 return (-ENODEV);
2307         }
2308
2309         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2310         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2311         return 0;
2312 }
2313
2314 int
2315 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2316 {
2317         struct rte_eth_dev *dev;
2318
2319         if (port_id >= nb_ports) {
2320                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2321                 return (-ENODEV);
2322         }
2323
2324         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2325                 PMD_DEBUG_TRACE("Invalid port device\n");
2326                 return (-ENODEV);
2327         }
2328
2329         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2330         (*dev->dev_ops->bypass_wd_reset)(dev);
2331         return 0;
2332 }
2333 #endif