ethdev: set VMDq pool when adding mac address
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without 
8  *   modification, are permitted provided that the following conditions 
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright 
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright 
14  *       notice, this list of conditions and the following disclaimer in 
15  *       the documentation and/or other materials provided with the 
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its 
18  *       contributors may be used to endorse or promote products derived 
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  * 
33  */
34
35 #include <sys/types.h>
36 #include <sys/queue.h>
37 #include <ctype.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <stdarg.h>
42 #include <errno.h>
43 #include <stdint.h>
44 #include <inttypes.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while(0)
87 #define PROC_PRIMARY_OR_RET() do { \
88         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
89                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
90                 return; \
91         } \
92 } while(0)
93
94 /* Macros to check for invlaid function pointers in dev_ops structure */
95 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
96         if ((func) == NULL) { \
97                 PMD_DEBUG_TRACE("Function not supported\n"); \
98                 return (retval); \
99         } \
100 } while(0)
101 #define FUNC_PTR_OR_RET(func) do { \
102         if ((func) == NULL) { \
103                 PMD_DEBUG_TRACE("Function not supported\n"); \
104                 return; \
105         } \
106 } while(0)
107
108 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
109 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
110 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
111 static uint8_t nb_ports = 0;
112
113 /* spinlock for eth device callbacks */
114 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
115
116 /**
117  * The user application callback description.
118  *
119  * It contains callback address to be registered by user application,
120  * the pointer to the parameters for callback, and the event type.
121  */
122 struct rte_eth_dev_callback {
123         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
124         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
125         void *cb_arg;                           /**< Parameter for callback */
126         enum rte_eth_event_type event;          /**< Interrupt event type */
127         uint32_t active;                        /**< Callback is executing */
128 };
129
130 enum {
131         STAT_QMAP_TX = 0,
132         STAT_QMAP_RX
133 };
134
135 static inline void
136 rte_eth_dev_data_alloc(void)
137 {
138         const unsigned flags = 0;
139         const struct rte_memzone *mz;
140
141         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
142                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
143                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
144                                 rte_socket_id(), flags);
145         } else
146                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
147         if (mz == NULL)
148                 rte_panic("Cannot allocate memzone for ethernet port data\n");
149
150         rte_eth_dev_data = mz->addr;
151         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
152                 memset(rte_eth_dev_data, 0,
153                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
154 }
155
156 static inline struct rte_eth_dev *
157 rte_eth_dev_allocate(void)
158 {
159         struct rte_eth_dev *eth_dev;
160
161         if (nb_ports == RTE_MAX_ETHPORTS) {
162                 PMD_DEBUG_TRACE("Reached maximum number of ethernet ports\n");
163                 return NULL;
164         }
165
166         if (rte_eth_dev_data == NULL)
167                 rte_eth_dev_data_alloc();
168
169         eth_dev = &rte_eth_devices[nb_ports];
170         eth_dev->data = &rte_eth_dev_data[nb_ports];
171         eth_dev->data->port_id = nb_ports++;
172         return eth_dev;
173 }
174
175 static int
176 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
177                  struct rte_pci_device *pci_dev)
178 {
179         struct eth_driver    *eth_drv;
180         struct rte_eth_dev *eth_dev;
181         int diag;
182
183         eth_drv = (struct eth_driver *)pci_drv;
184
185         eth_dev = rte_eth_dev_allocate();
186         if (eth_dev == NULL)
187                 return -ENOMEM;
188
189         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
190                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
191                                   eth_drv->dev_private_size,
192                                   CACHE_LINE_SIZE);
193                 if (eth_dev->data->dev_private == NULL)
194                         rte_panic("Cannot allocate memzone for private port data\n");
195         }
196         eth_dev->pci_dev = pci_dev;
197         eth_dev->driver = eth_drv;
198         eth_dev->data->rx_mbuf_alloc_failed = 0;
199
200         /* init user callbacks */
201         TAILQ_INIT(&(eth_dev->callbacks));
202
203         /*
204          * Set the default maximum frame size.
205          */
206         eth_dev->data->max_frame_size = ETHER_MAX_LEN;
207
208         /* Invoke PMD device initialization function */
209         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
210         if (diag == 0)
211                 return (0);
212
213         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
214                         " failed\n", pci_drv->name,
215                         (unsigned) pci_dev->id.vendor_id,
216                         (unsigned) pci_dev->id.device_id);
217         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
218                 rte_free(eth_dev->data->dev_private);
219         nb_ports--;
220         return diag;
221 }
222
223 /**
224  * Register an Ethernet [Poll Mode] driver.
225  *
226  * Function invoked by the initialization function of an Ethernet driver
227  * to simultaneously register itself as a PCI driver and as an Ethernet
228  * Poll Mode Driver.
229  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
230  * structure embedded in the *eth_drv* structure, after having stored the
231  * address of the rte_eth_dev_init() function in the *devinit* field of
232  * the *pci_drv* structure.
233  * During the PCI probing phase, the rte_eth_dev_init() function is
234  * invoked for each PCI [Ethernet device] matching the embedded PCI
235  * identifiers provided by the driver.
236  */
237 void
238 rte_eth_driver_register(struct eth_driver *eth_drv)
239 {
240         eth_drv->pci_drv.devinit = rte_eth_dev_init;
241         rte_eal_pci_register(&eth_drv->pci_drv);
242 }
243
244 int
245 rte_eth_dev_socket_id(uint8_t port_id)
246 {
247         if (port_id >= nb_ports)
248                 return -1;
249         return rte_eth_devices[port_id].pci_dev->numa_node;
250 }
251
252 uint8_t
253 rte_eth_dev_count(void)
254 {
255         return (nb_ports);
256 }
257
258 static int
259 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
260 {
261         uint16_t old_nb_queues = dev->data->nb_rx_queues;
262         void **rxq;
263         unsigned i;
264
265         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
266
267         if (dev->data->rx_queues == NULL) {
268                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
269                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
270                                 CACHE_LINE_SIZE);
271                 if (dev->data->rx_queues == NULL) {
272                         dev->data->nb_rx_queues = 0;
273                         return -(ENOMEM);
274                 }
275         } else {
276                 rxq = dev->data->rx_queues;
277
278                 for (i = nb_queues; i < old_nb_queues; i++)
279                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
280                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
281                                 CACHE_LINE_SIZE);
282                 if (rxq == NULL)
283                         return -(ENOMEM);
284
285                 if (nb_queues > old_nb_queues)
286                         memset(rxq + old_nb_queues, 0,
287                                 sizeof(rxq[0]) * (nb_queues - old_nb_queues));
288
289                 dev->data->rx_queues = rxq;
290
291         }
292         dev->data->nb_rx_queues = nb_queues;
293         return (0);
294 }
295
296 static int
297 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
298 {
299         uint16_t old_nb_queues = dev->data->nb_tx_queues;
300         void **txq;
301         unsigned i;
302
303         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
304
305         if (dev->data->tx_queues == NULL) {
306                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
307                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
308                                 CACHE_LINE_SIZE);
309                 if (dev->data->tx_queues == NULL) {
310                         dev->data->nb_tx_queues = 0;
311                         return -(ENOMEM);
312                 }
313         } else {
314                 txq = dev->data->tx_queues;
315
316                 for (i = nb_queues; i < old_nb_queues; i++)
317                         (*dev->dev_ops->tx_queue_release)(txq[i]);
318                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
319                                 CACHE_LINE_SIZE);
320                 if (txq == NULL)
321                         return -(ENOMEM);
322
323                 if (nb_queues > old_nb_queues)
324                         memset(txq + old_nb_queues, 0,
325                                 sizeof(txq[0]) * (nb_queues - old_nb_queues));
326
327                 dev->data->tx_queues = txq;
328
329         }
330         dev->data->nb_tx_queues = nb_queues;
331         return (0);
332 }
333
334 int
335 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
336                       const struct rte_eth_conf *dev_conf)
337 {
338         struct rte_eth_dev *dev;
339         struct rte_eth_dev_info dev_info;
340         int diag;
341
342         /* This function is only safe when called from the primary process
343          * in a multi-process setup*/
344         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
345
346         if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
347                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
348                 return (-EINVAL);
349         }
350         dev = &rte_eth_devices[port_id];
351
352         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
353         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
354
355         if (dev->data->dev_started) {
356                 PMD_DEBUG_TRACE(
357                     "port %d must be stopped to allow configuration\n", port_id);
358                 return (-EBUSY);
359         }
360
361         /*
362          * Check that the numbers of RX and TX queues are not greater
363          * than the maximum number of RX and TX queues supported by the
364          * configured device.
365          */
366         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
367         if (nb_rx_q > dev_info.max_rx_queues) {
368                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
369                                 port_id, nb_rx_q, dev_info.max_rx_queues);
370                 return (-EINVAL);
371         }
372         if (nb_rx_q == 0) {
373                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
374                 return (-EINVAL);
375         }
376
377         if (nb_tx_q > dev_info.max_tx_queues) {
378                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
379                                 port_id, nb_tx_q, dev_info.max_tx_queues);
380                 return (-EINVAL);
381         }
382         if (nb_tx_q == 0) {
383                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
384                 return (-EINVAL);
385         }
386
387         /* Copy the dev_conf parameter into the dev structure */
388         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
389
390         /*
391          * If jumbo frames are enabled, check that the maximum RX packet
392          * length is supported by the configured device.
393          */
394         if (dev_conf->rxmode.jumbo_frame == 1) {
395                 if (dev_conf->rxmode.max_rx_pkt_len >
396                     dev_info.max_rx_pktlen) {
397                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
398                                 " > max valid value %u\n",
399                                 port_id,
400                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
401                                 (unsigned)dev_info.max_rx_pktlen);
402                         return (-EINVAL);
403                 }
404                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
405                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
406                                 " < min valid value %u\n",
407                                 port_id,
408                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
409                                 (unsigned)ETHER_MIN_LEN);
410                         return (-EINVAL);
411                 }
412         } else
413                 /* Use default value */
414                 dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
415
416         /* For vmdb+dcb mode check our configuration before we go further */
417         if (dev_conf->rxmode.mq_mode == ETH_VMDQ_DCB) {
418                 const struct rte_eth_vmdq_dcb_conf *conf;
419
420                 if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
421                         PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
422                                         "!= %d\n",
423                                         port_id, ETH_VMDQ_DCB_NUM_QUEUES);
424                         return (-EINVAL);
425                 }
426                 conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
427                 if (! (conf->nb_queue_pools == ETH_16_POOLS ||
428                        conf->nb_queue_pools == ETH_32_POOLS)) {
429                     PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
430                                     "nb_queue_pools must be %d or %d\n",
431                                     port_id, ETH_16_POOLS, ETH_32_POOLS);
432                     return (-EINVAL);
433                 }
434         }
435         if (dev_conf->txmode.mq_mode == ETH_VMDQ_DCB_TX) {
436                 const struct rte_eth_vmdq_dcb_tx_conf *conf;
437
438                 if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
439                         PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
440                                         "!= %d\n",
441                                         port_id, ETH_VMDQ_DCB_NUM_QUEUES);
442                         return (-EINVAL);
443                 }
444                 conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
445                 if (! (conf->nb_queue_pools == ETH_16_POOLS ||
446                        conf->nb_queue_pools == ETH_32_POOLS)) {
447                         PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
448                                     "nb_queue_pools != %d or nb_queue_pools "
449                                     "!= %d\n",
450                                     port_id, ETH_16_POOLS, ETH_32_POOLS);
451                         return (-EINVAL);
452                 }
453         }
454         
455         /* For DCB mode check our configuration before we go further */
456         if (dev_conf->rxmode.mq_mode == ETH_DCB_RX) {
457                 const struct rte_eth_dcb_rx_conf *conf;
458
459                 if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
460                         PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
461                                         "!= %d\n",
462                                         port_id, ETH_DCB_NUM_QUEUES);
463                         return (-EINVAL);
464                 }
465                 conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
466                 if (! (conf->nb_tcs == ETH_4_TCS ||
467                        conf->nb_tcs == ETH_8_TCS)) {
468                         PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
469                                     "nb_tcs != %d or nb_tcs "
470                                     "!= %d\n",
471                                     port_id, ETH_4_TCS, ETH_8_TCS);
472                         return (-EINVAL);
473                 }
474         }
475
476         if (dev_conf->txmode.mq_mode == ETH_DCB_TX) {
477                 const struct rte_eth_dcb_tx_conf *conf;
478
479                 if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
480                         PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
481                                         "!= %d\n",
482                                         port_id, ETH_DCB_NUM_QUEUES);
483                         return (-EINVAL);
484                 }
485                 conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
486                 if (! (conf->nb_tcs == ETH_4_TCS ||
487                        conf->nb_tcs == ETH_8_TCS)) {
488                         PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
489                                     "nb_tcs != %d or nb_tcs "
490                                     "!= %d\n",
491                                     port_id, ETH_4_TCS, ETH_8_TCS);
492                         return (-EINVAL);
493                 }
494         }
495
496         /*
497          * Setup new number of RX/TX queues and reconfigure device.
498          */
499         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
500         if (diag != 0) {
501                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
502                                 port_id, diag);
503                 return diag;
504         }
505
506         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
507         if (diag != 0) {
508                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
509                                 port_id, diag);
510                 rte_eth_dev_rx_queue_config(dev, 0);
511                 return diag;
512         }
513
514         diag = (*dev->dev_ops->dev_configure)(dev);
515         if (diag != 0) {
516                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
517                                 port_id, diag);
518                 rte_eth_dev_rx_queue_config(dev, 0);
519                 rte_eth_dev_tx_queue_config(dev, 0);
520                 return diag;
521         }
522
523         return 0;
524 }
525
526 static void
527 rte_eth_dev_config_restore(uint8_t port_id)
528 {
529         struct rte_eth_dev *dev;
530         struct rte_eth_dev_info dev_info;
531         struct ether_addr addr;
532         uint16_t i;
533         uint32_t pool = 0;
534
535         dev = &rte_eth_devices[port_id];
536
537         rte_eth_dev_info_get(port_id, &dev_info);
538
539         if (RTE_ETH_DEV_SRIOV(dev).active)
540                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
541
542         /* replay MAC address configuration */
543         for (i = 0; i < dev_info.max_mac_addrs; i++) {
544                 addr = dev->data->mac_addrs[i];
545
546                 /* skip zero address */
547                 if (is_zero_ether_addr(&addr))
548                         continue;
549
550                 /* add address to the hardware */
551                 if  (*dev->dev_ops->mac_addr_add)
552                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
553                 else {
554                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
555                                         port_id);
556                         /* exit the loop but not return an error */
557                         break;
558                 }
559         }
560
561         /* replay promiscuous configuration */
562         if (rte_eth_promiscuous_get(port_id) == 1)
563                 rte_eth_promiscuous_enable(port_id);
564         else if (rte_eth_promiscuous_get(port_id) == 0)
565                 rte_eth_promiscuous_disable(port_id);
566
567         /* replay allmulticast configuration */
568         if (rte_eth_allmulticast_get(port_id) == 1)
569                 rte_eth_allmulticast_enable(port_id);
570         else if (rte_eth_allmulticast_get(port_id) == 0)
571                 rte_eth_allmulticast_disable(port_id);
572 }
573
574 int
575 rte_eth_dev_start(uint8_t port_id)
576 {
577         struct rte_eth_dev *dev;
578         int diag;
579
580         /* This function is only safe when called from the primary process
581          * in a multi-process setup*/
582         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
583
584         if (port_id >= nb_ports) {
585                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
586                 return (-EINVAL);
587         }
588         dev = &rte_eth_devices[port_id];
589
590         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
591         diag = (*dev->dev_ops->dev_start)(dev);
592         if (diag == 0)
593                 dev->data->dev_started = 1;
594         else
595                 return diag;
596
597         rte_eth_dev_config_restore(port_id);
598
599         return 0;
600 }
601
602 void
603 rte_eth_dev_stop(uint8_t port_id)
604 {
605         struct rte_eth_dev *dev;
606
607         /* This function is only safe when called from the primary process
608          * in a multi-process setup*/
609         PROC_PRIMARY_OR_RET();
610
611         if (port_id >= nb_ports) {
612                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
613                 return;
614         }
615         dev = &rte_eth_devices[port_id];
616
617         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
618         dev->data->dev_started = 0;
619         (*dev->dev_ops->dev_stop)(dev);
620 }
621
622 void
623 rte_eth_dev_close(uint8_t port_id)
624 {
625         struct rte_eth_dev *dev;
626
627         /* This function is only safe when called from the primary process
628          * in a multi-process setup*/
629         PROC_PRIMARY_OR_RET();
630
631         if (port_id >= nb_ports) {
632                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
633                 return;
634         }
635
636         dev = &rte_eth_devices[port_id];
637
638         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
639         dev->data->dev_started = 0;
640         (*dev->dev_ops->dev_close)(dev);
641 }
642
643 int
644 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
645                        uint16_t nb_rx_desc, unsigned int socket_id,
646                        const struct rte_eth_rxconf *rx_conf,
647                        struct rte_mempool *mp)
648 {
649         struct rte_eth_dev *dev;
650         struct rte_pktmbuf_pool_private *mbp_priv;
651         struct rte_eth_dev_info dev_info;
652
653         /* This function is only safe when called from the primary process
654          * in a multi-process setup*/
655         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
656
657         if (port_id >= nb_ports) {
658                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
659                 return (-EINVAL);
660         }
661         dev = &rte_eth_devices[port_id];
662         if (rx_queue_id >= dev->data->nb_rx_queues) {
663                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
664                 return (-EINVAL);
665         }
666
667         if (dev->data->dev_started) {
668                 PMD_DEBUG_TRACE(
669                     "port %d must be stopped to allow configuration\n", port_id);
670                 return -EBUSY;
671         }
672
673         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
674         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
675
676         /*
677          * Check the size of the mbuf data buffer.
678          * This value must be provided in the private data of the memory pool.
679          * First check that the memory pool has a valid private data.
680          */
681         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
682         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
683                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
684                                 mp->name, (int) mp->private_data_size,
685                                 (int) sizeof(struct rte_pktmbuf_pool_private));
686                 return (-ENOSPC);
687         }
688         mbp_priv = (struct rte_pktmbuf_pool_private *)
689                 ((char *)mp + sizeof(struct rte_mempool));
690         if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
691             dev_info.min_rx_bufsize) {
692                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
693                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
694                                 "=%d)\n",
695                                 mp->name,
696                                 (int)mbp_priv->mbuf_data_room_size,
697                                 (int)(RTE_PKTMBUF_HEADROOM +
698                                       dev_info.min_rx_bufsize),
699                                 (int)RTE_PKTMBUF_HEADROOM,
700                                 (int)dev_info.min_rx_bufsize);
701                 return (-EINVAL);
702         }
703
704         return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
705                                                socket_id, rx_conf, mp);
706 }
707
708 int
709 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
710                        uint16_t nb_tx_desc, unsigned int socket_id,
711                        const struct rte_eth_txconf *tx_conf)
712 {
713         struct rte_eth_dev *dev;
714
715         /* This function is only safe when called from the primary process
716          * in a multi-process setup*/
717         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
718
719         if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
720                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
721                 return (-EINVAL);
722         }
723         dev = &rte_eth_devices[port_id];
724         if (tx_queue_id >= dev->data->nb_tx_queues) {
725                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
726                 return (-EINVAL);
727         }
728
729         if (dev->data->dev_started) {
730                 PMD_DEBUG_TRACE(
731                     "port %d must be stopped to allow configuration\n", port_id);
732                 return -EBUSY;
733         }
734
735         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
736         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
737                                                socket_id, tx_conf);
738 }
739
740 void
741 rte_eth_promiscuous_enable(uint8_t port_id)
742 {
743         struct rte_eth_dev *dev;
744
745         if (port_id >= nb_ports) {
746                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
747                 return;
748         }
749         dev = &rte_eth_devices[port_id];
750
751         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
752         (*dev->dev_ops->promiscuous_enable)(dev);
753         dev->data->promiscuous = 1;
754 }
755
756 void
757 rte_eth_promiscuous_disable(uint8_t port_id)
758 {
759         struct rte_eth_dev *dev;
760
761         if (port_id >= nb_ports) {
762                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
763                 return;
764         }
765         dev = &rte_eth_devices[port_id];
766
767         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
768         dev->data->promiscuous = 0;
769         (*dev->dev_ops->promiscuous_disable)(dev);
770 }
771
772 int
773 rte_eth_promiscuous_get(uint8_t port_id)
774 {
775         struct rte_eth_dev *dev;
776
777         if (port_id >= nb_ports) {
778                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
779                 return -1;
780         }
781
782         dev = &rte_eth_devices[port_id];
783         return dev->data->promiscuous;
784 }
785
786 void
787 rte_eth_allmulticast_enable(uint8_t port_id)
788 {
789         struct rte_eth_dev *dev;
790
791         if (port_id >= nb_ports) {
792                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
793                 return;
794         }
795         dev = &rte_eth_devices[port_id];
796
797         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
798         (*dev->dev_ops->allmulticast_enable)(dev);
799         dev->data->all_multicast = 1;
800 }
801
802 void
803 rte_eth_allmulticast_disable(uint8_t port_id)
804 {
805         struct rte_eth_dev *dev;
806
807         if (port_id >= nb_ports) {
808                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
809                 return;
810         }
811         dev = &rte_eth_devices[port_id];
812
813         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
814         dev->data->all_multicast = 0;
815         (*dev->dev_ops->allmulticast_disable)(dev);
816 }
817
818 int
819 rte_eth_allmulticast_get(uint8_t port_id)
820 {
821         struct rte_eth_dev *dev;
822
823         if (port_id >= nb_ports) {
824                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
825                 return -1;
826         }
827
828         dev = &rte_eth_devices[port_id];
829         return dev->data->all_multicast;
830 }
831
832 static inline int
833 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
834                                 struct rte_eth_link *link)
835 {
836         struct rte_eth_link *dst = link;
837         struct rte_eth_link *src = &(dev->data->dev_link);
838
839         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
840                                         *(uint64_t *)src) == 0)
841                 return -1;
842
843         return 0;
844 }
845
846 void
847 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
848 {
849         struct rte_eth_dev *dev;
850
851         if (port_id >= nb_ports) {
852                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
853                 return;
854         }
855         dev = &rte_eth_devices[port_id];
856         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
857
858         if (dev->data->dev_conf.intr_conf.lsc != 0)
859                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
860         else {
861                 (*dev->dev_ops->link_update)(dev, 1);
862                 *eth_link = dev->data->dev_link;
863         }
864 }
865
866 void
867 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
868 {
869         struct rte_eth_dev *dev;
870
871         if (port_id >= nb_ports) {
872                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
873                 return;
874         }
875         dev = &rte_eth_devices[port_id];
876         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
877
878         if (dev->data->dev_conf.intr_conf.lsc != 0)
879                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
880         else {
881                 (*dev->dev_ops->link_update)(dev, 0);
882                 *eth_link = dev->data->dev_link;
883         }
884 }
885
886 void
887 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
888 {
889         struct rte_eth_dev *dev;
890
891         if (port_id >= nb_ports) {
892                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
893                 return;
894         }
895         dev = &rte_eth_devices[port_id];
896
897         FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
898         (*dev->dev_ops->stats_get)(dev, stats);
899         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
900 }
901
902 void
903 rte_eth_stats_reset(uint8_t port_id)
904 {
905         struct rte_eth_dev *dev;
906
907         if (port_id >= nb_ports) {
908                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
909                 return;
910         }
911         dev = &rte_eth_devices[port_id];
912
913         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
914         (*dev->dev_ops->stats_reset)(dev);
915 }
916
917
918 static int
919 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
920                 uint8_t is_rx)
921 {
922         struct rte_eth_dev *dev;
923
924         if (port_id >= nb_ports) {
925                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
926                 return -ENODEV;
927         }
928         dev = &rte_eth_devices[port_id];
929
930         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
931         return (*dev->dev_ops->queue_stats_mapping_set)
932                         (dev, queue_id, stat_idx, is_rx);
933 }
934
935
936 int
937 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
938                 uint8_t stat_idx)
939 {
940         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
941                         STAT_QMAP_TX);
942 }
943
944
945 int
946 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
947                 uint8_t stat_idx)
948 {
949         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
950                         STAT_QMAP_RX);
951 }
952
953
954 void
955 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
956 {
957         struct rte_eth_dev *dev;
958
959         if (port_id >= nb_ports) {
960                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
961                 return;
962         }
963         dev = &rte_eth_devices[port_id];
964
965         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
966         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
967         dev_info->pci_dev = dev->pci_dev;
968         dev_info->driver_name = dev->driver->pci_drv.name;
969 }
970
971 void
972 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
973 {
974         struct rte_eth_dev *dev;
975
976         if (port_id >= nb_ports) {
977                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
978                 return;
979         }
980         dev = &rte_eth_devices[port_id];
981         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
982 }
983
984 int
985 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
986 {
987         struct rte_eth_dev *dev;
988
989         if (port_id >= nb_ports) {
990                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
991                 return (-ENODEV);
992         }
993         dev = &rte_eth_devices[port_id];
994         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
995                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
996                 return (-ENOSYS);
997         }
998
999         if (vlan_id > 4095) {
1000                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1001                                 port_id, (unsigned) vlan_id);
1002                 return (-EINVAL);
1003         }
1004         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1005         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1006         return (0);
1007 }
1008
1009 int
1010 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1011 {
1012         struct rte_eth_dev *dev;
1013
1014         if (port_id >= nb_ports) {
1015                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1016                 return (-ENODEV);
1017         }
1018
1019         dev = &rte_eth_devices[port_id];
1020         if (rx_queue_id >= dev->data->nb_rx_queues) {
1021                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1022                 return (-EINVAL);
1023         }
1024
1025         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1026         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1027
1028         return (0);
1029 }
1030
1031 int
1032 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1033 {
1034         struct rte_eth_dev *dev;
1035
1036         if (port_id >= nb_ports) {
1037                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1038                 return (-ENODEV);
1039         }
1040
1041         dev = &rte_eth_devices[port_id];
1042         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1043         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1044
1045         return (0);
1046 }
1047
1048 int
1049 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1050 {
1051         struct rte_eth_dev *dev;
1052         int ret = 0;
1053         int mask = 0;
1054         int cur, org = 0;
1055         
1056         if (port_id >= nb_ports) {
1057                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1058                 return (-ENODEV);
1059         }
1060
1061         dev = &rte_eth_devices[port_id];
1062
1063         /*check which option changed by application*/
1064         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1065         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1066         if (cur != org){
1067                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1068                 mask |= ETH_VLAN_STRIP_MASK;
1069         }
1070         
1071         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1072         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1073         if (cur != org){
1074                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1075                 mask |= ETH_VLAN_FILTER_MASK;
1076         }
1077
1078         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1079         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1080         if (cur != org){
1081                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1082                 mask |= ETH_VLAN_EXTEND_MASK;
1083         }
1084
1085         /*no change*/
1086         if(mask == 0)
1087                 return ret;
1088         
1089         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1090         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1091
1092         return ret;
1093 }
1094
1095 int
1096 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1097 {
1098         struct rte_eth_dev *dev;
1099         int ret = 0;
1100
1101         if (port_id >= nb_ports) {
1102                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1103                 return (-ENODEV);
1104         }
1105
1106         dev = &rte_eth_devices[port_id];
1107
1108         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1109                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1110
1111         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1112                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1113
1114         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1115                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1116
1117         return ret;
1118 }
1119
1120
1121 int
1122 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1123                                       struct rte_fdir_filter *fdir_filter,
1124                                       uint8_t queue)
1125 {
1126         struct rte_eth_dev *dev;
1127
1128         if (port_id >= nb_ports) {
1129                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1130                 return (-ENODEV);
1131         }
1132
1133         dev = &rte_eth_devices[port_id];
1134
1135         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1136                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1137                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1138                 return (-ENOSYS);
1139         }
1140
1141         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1142              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1143             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1144                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1145                                 "None l4type, source & destinations ports " \
1146                                 "should be null!\n");
1147                 return (-EINVAL);
1148         }
1149
1150         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1151         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1152                                                                 queue);
1153 }
1154
1155 int
1156 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1157                                          struct rte_fdir_filter *fdir_filter,
1158                                          uint8_t queue)
1159 {
1160         struct rte_eth_dev *dev;
1161
1162         if (port_id >= nb_ports) {
1163                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1164                 return (-ENODEV);
1165         }
1166
1167         dev = &rte_eth_devices[port_id];
1168
1169         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1170                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1171                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1172                 return (-ENOSYS);
1173         }
1174
1175         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1176              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1177             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1178                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1179                                 "None l4type, source & destinations ports " \
1180                                 "should be null!\n");
1181                 return (-EINVAL);
1182         }
1183
1184         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1185         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1186                                                                 queue);
1187
1188 }
1189
1190 int
1191 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1192                                          struct rte_fdir_filter *fdir_filter)
1193 {
1194         struct rte_eth_dev *dev;
1195
1196         if (port_id >= nb_ports) {
1197                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1198                 return (-ENODEV);
1199         }
1200
1201         dev = &rte_eth_devices[port_id];
1202
1203         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1204                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1205                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1206                 return (-ENOSYS);
1207         }
1208
1209         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1210              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1211             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1212                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1213                                 "None l4type source & destinations ports " \
1214                                 "should be null!\n");
1215                 return (-EINVAL);
1216         }
1217
1218         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1219         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1220 }
1221
1222 int
1223 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1224 {
1225         struct rte_eth_dev *dev;
1226
1227         if (port_id >= nb_ports) {
1228                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1229                 return (-ENODEV);
1230         }
1231
1232         dev = &rte_eth_devices[port_id];
1233         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1234                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1235                 return (-ENOSYS);
1236         }
1237
1238         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1239
1240         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1241         return (0);
1242 }
1243
1244 int
1245 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1246                                     struct rte_fdir_filter *fdir_filter,
1247                                     uint16_t soft_id, uint8_t queue,
1248                                     uint8_t drop)
1249 {
1250         struct rte_eth_dev *dev;
1251
1252         if (port_id >= nb_ports) {
1253                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1254                 return (-ENODEV);
1255         }
1256
1257         dev = &rte_eth_devices[port_id];
1258
1259         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1260                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1261                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1262                 return (-ENOSYS);
1263         }
1264
1265         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1266              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1267             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1268                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1269                                 "None l4type, source & destinations ports " \
1270                                 "should be null!\n");
1271                 return (-EINVAL);
1272         }
1273
1274         /* For now IPv6 is not supported with perfect filter */
1275         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1276                 return (-ENOTSUP);
1277
1278         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1279         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1280                                                                 soft_id, queue,
1281                                                                 drop);
1282 }
1283
1284 int
1285 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1286                                        struct rte_fdir_filter *fdir_filter,
1287                                        uint16_t soft_id, uint8_t queue,
1288                                        uint8_t drop)
1289 {
1290         struct rte_eth_dev *dev;
1291
1292         if (port_id >= nb_ports) {
1293                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1294                 return (-ENODEV);
1295         }
1296
1297         dev = &rte_eth_devices[port_id];
1298
1299         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1300                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1301                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1302                 return (-ENOSYS);
1303         }
1304
1305         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1306              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1307             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1308                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1309                                 "None l4type, source & destinations ports " \
1310                                 "should be null!\n");
1311                 return (-EINVAL);
1312         }
1313
1314         /* For now IPv6 is not supported with perfect filter */
1315         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1316                 return (-ENOTSUP);
1317
1318         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1319         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1320                                                         soft_id, queue, drop);
1321 }
1322
1323 int
1324 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1325                                        struct rte_fdir_filter *fdir_filter,
1326                                        uint16_t soft_id)
1327 {
1328         struct rte_eth_dev *dev;
1329
1330         if (port_id >= nb_ports) {
1331                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1332                 return (-ENODEV);
1333         }
1334
1335         dev = &rte_eth_devices[port_id];
1336
1337         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1338                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1339                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1340                 return (-ENOSYS);
1341         }
1342
1343         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1344              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1345             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1346                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1347                                 "None l4type, source & destinations ports " \
1348                                 "should be null!\n");
1349                 return (-EINVAL);
1350         }
1351
1352         /* For now IPv6 is not supported with perfect filter */
1353         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1354                 return (-ENOTSUP);
1355
1356         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1357         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
1358                                                                 soft_id);
1359 }
1360
1361 int
1362 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1363 {
1364         struct rte_eth_dev *dev;
1365
1366         if (port_id >= nb_ports) {
1367                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1368                 return (-ENODEV);
1369         }
1370
1371         dev = &rte_eth_devices[port_id];
1372         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1373                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1374                 return (-ENOSYS);
1375         }
1376
1377         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1378         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1379 }
1380
1381 int
1382 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1383 {
1384         struct rte_eth_dev *dev;
1385
1386         if (port_id >= nb_ports) {
1387                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1388                 return (-ENODEV);
1389         }
1390
1391         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1392                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1393                 return (-EINVAL);
1394         }
1395
1396         dev = &rte_eth_devices[port_id];
1397         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1398         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1399 }
1400
1401 int
1402 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1403 {
1404         struct rte_eth_dev *dev;
1405
1406         if (port_id >= nb_ports) {
1407                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1408                 return (-ENODEV);
1409         }
1410
1411         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1412                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1413                 return (-EINVAL);
1414         }
1415
1416         dev = &rte_eth_devices[port_id];
1417         /* High water, low water validation are device specific */
1418         if  (*dev->dev_ops->priority_flow_ctrl_set)
1419                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1420         return (-ENOTSUP);
1421 }
1422
1423 int
1424 rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1425 {
1426         struct rte_eth_dev *dev;
1427         uint8_t i,j;
1428
1429         if (port_id >= nb_ports) {
1430                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1431                 return (-ENODEV);
1432         }
1433
1434         /* Invalid mask bit(s) setting */
1435         if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1436                 PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
1437                 return (-EINVAL);
1438         }
1439
1440         if (reta_conf->mask_lo != 0) {
1441                 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1442                         if ((reta_conf->mask_lo & (1ULL << i)) &&
1443                                 (reta_conf->reta[i] >= ETH_RSS_RETA_MAX_QUEUE)) {
1444                                 PMD_DEBUG_TRACE("RETA hash index output"
1445                                         "configration for port=%d,invalid"
1446                                         "queue=%d\n",port_id,reta_conf->reta[i]);
1447
1448                                 return (-EINVAL);
1449                         } 
1450                 }
1451         }
1452
1453         if (reta_conf->mask_hi != 0) {
1454                 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {       
1455                         j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
1456
1457                         /* Check if the max entry >= 128 */
1458                         if ((reta_conf->mask_hi & (1ULL << i)) && 
1459                                 (reta_conf->reta[j] >= ETH_RSS_RETA_MAX_QUEUE)) {
1460                                 PMD_DEBUG_TRACE("RETA hash index output"
1461                                         "configration for port=%d,invalid"
1462                                         "queue=%d\n",port_id,reta_conf->reta[j]);
1463
1464                                 return (-EINVAL);
1465                         }
1466                 }
1467         }
1468
1469         dev = &rte_eth_devices[port_id];
1470
1471         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1472         return (*dev->dev_ops->reta_update)(dev, reta_conf);
1473 }
1474
1475 int 
1476 rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1477 {
1478         struct rte_eth_dev *dev;
1479         
1480         if (port_id >= nb_ports) {
1481                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1482                 return (-ENODEV);
1483         }
1484
1485         if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1486                 PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
1487                 return (-EINVAL);
1488         }
1489
1490         dev = &rte_eth_devices[port_id];
1491         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1492         return (*dev->dev_ops->reta_query)(dev, reta_conf);
1493 }
1494
1495 int
1496 rte_eth_led_on(uint8_t port_id)
1497 {
1498         struct rte_eth_dev *dev;
1499
1500         if (port_id >= nb_ports) {
1501                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1502                 return (-ENODEV);
1503         }
1504
1505         dev = &rte_eth_devices[port_id];
1506         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1507         return ((*dev->dev_ops->dev_led_on)(dev));
1508 }
1509
1510 int
1511 rte_eth_led_off(uint8_t port_id)
1512 {
1513         struct rte_eth_dev *dev;
1514
1515         if (port_id >= nb_ports) {
1516                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1517                 return (-ENODEV);
1518         }
1519
1520         dev = &rte_eth_devices[port_id];
1521         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1522         return ((*dev->dev_ops->dev_led_off)(dev));
1523 }
1524
1525 /*
1526  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1527  * an empty spot.
1528  */
1529 static inline int
1530 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1531 {
1532         struct rte_eth_dev_info dev_info;
1533         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1534         unsigned i;
1535
1536         rte_eth_dev_info_get(port_id, &dev_info);
1537
1538         for (i = 0; i < dev_info.max_mac_addrs; i++)
1539                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1540                         return i;
1541
1542         return -1;
1543 }
1544
1545 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
1546
1547 int
1548 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
1549                 uint32_t pool)
1550 {
1551         struct rte_eth_dev *dev;
1552         int index;
1553
1554         if (port_id >= nb_ports) {
1555                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1556                 return (-ENODEV);
1557         }
1558         dev = &rte_eth_devices[port_id];
1559         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
1560
1561         if (is_zero_ether_addr(addr)) {
1562                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", port_id);
1563                 return (-EINVAL);
1564         }
1565
1566         /* Check if it's already there, and do nothing */
1567         index = get_mac_addr_index(port_id, addr);
1568         if (index >= 0)
1569                 return 0;
1570
1571         index = get_mac_addr_index(port_id, &null_mac_addr);
1572         if (index < 0) {
1573                 PMD_DEBUG_TRACE("port %d: MAC address array full\n", port_id);
1574                 return (-ENOSPC);
1575         }
1576
1577         /* Update NIC */
1578         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
1579
1580         /* Update address in NIC data structure */
1581         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
1582
1583         return 0;
1584 }
1585
1586 int
1587 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
1588 {
1589         struct rte_eth_dev *dev;
1590         int index;
1591
1592         if (port_id >= nb_ports) {
1593                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1594                 return (-ENODEV);
1595         }
1596         dev = &rte_eth_devices[port_id];
1597         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
1598
1599         index = get_mac_addr_index(port_id, addr);
1600         if (index == 0) {
1601                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
1602                 return (-EADDRINUSE);
1603         } else if (index < 0)
1604                 return 0;  /* Do nothing if address wasn't found */
1605
1606         /* Update NIC */
1607         (*dev->dev_ops->mac_addr_remove)(dev, index);
1608
1609         /* Update address in NIC data structure */
1610         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
1611
1612         return 0;
1613 }
1614
1615 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1616 uint16_t
1617 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
1618                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1619 {
1620         struct rte_eth_dev *dev;
1621
1622         if (port_id >= nb_ports) {
1623                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1624                 return 0;
1625         }
1626         dev = &rte_eth_devices[port_id];
1627         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
1628         if (queue_id >= dev->data->nb_rx_queues) {
1629                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
1630                 return 0;
1631         }
1632         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
1633                                                 rx_pkts, nb_pkts);
1634 }
1635
1636 uint16_t
1637 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
1638                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1639 {
1640         struct rte_eth_dev *dev;
1641
1642         if (port_id >= nb_ports) {
1643                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1644                 return 0;
1645         }
1646         dev = &rte_eth_devices[port_id];
1647
1648         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
1649         if (queue_id >= dev->data->nb_tx_queues) {
1650                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
1651                 return 0;
1652         }
1653         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
1654                                                 tx_pkts, nb_pkts);
1655 }
1656
1657 uint32_t
1658 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
1659 {
1660         struct rte_eth_dev *dev;
1661
1662         if (port_id >= nb_ports) {
1663                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1664                 return 0;
1665         }
1666         dev = &rte_eth_devices[port_id];
1667         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
1668         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);  
1669 }
1670 #endif
1671
1672 int
1673 rte_eth_dev_callback_register(uint8_t port_id,
1674                         enum rte_eth_event_type event,
1675                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
1676 {
1677         struct rte_eth_dev *dev;
1678         struct rte_eth_dev_callback *user_cb;
1679
1680         if (!cb_fn)
1681                 return (-EINVAL);
1682         if (port_id >= nb_ports) {
1683                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1684                 return (-EINVAL);
1685         }
1686
1687         dev = &rte_eth_devices[port_id];
1688         rte_spinlock_lock(&rte_eth_dev_cb_lock);
1689
1690         TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
1691                 if (user_cb->cb_fn == cb_fn &&
1692                         user_cb->cb_arg == cb_arg &&
1693                         user_cb->event == event) {
1694                         break;
1695                 }
1696         }
1697
1698         /* create a new callback. */
1699         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1700                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
1701                 user_cb->cb_fn = cb_fn;
1702                 user_cb->cb_arg = cb_arg;
1703                 user_cb->event = event;
1704                 TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
1705         }
1706
1707         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1708         return ((user_cb == NULL) ? -ENOMEM : 0);
1709 }
1710
1711 int
1712 rte_eth_dev_callback_unregister(uint8_t port_id,
1713                         enum rte_eth_event_type event,
1714                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
1715 {
1716         int ret;
1717         struct rte_eth_dev *dev;
1718         struct rte_eth_dev_callback *cb, *next;
1719
1720         if (!cb_fn)
1721                 return (-EINVAL);
1722         if (port_id >= nb_ports) {
1723                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1724                 return (-EINVAL);
1725         }
1726
1727         dev = &rte_eth_devices[port_id];
1728         rte_spinlock_lock(&rte_eth_dev_cb_lock);
1729
1730         ret = 0;
1731         for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
1732
1733                 next = TAILQ_NEXT(cb, next);
1734
1735                 if (cb->cb_fn != cb_fn || cb->event != event ||
1736                                 (cb->cb_arg != (void *)-1 &&
1737                                 cb->cb_arg != cb_arg))
1738                         continue;
1739
1740                 /*
1741                  * if this callback is not executing right now,
1742                  * then remove it.
1743                  */
1744                 if (cb->active == 0) {
1745                         TAILQ_REMOVE(&(dev->callbacks), cb, next);
1746                         rte_free(cb);
1747                 } else {
1748                         ret = -EAGAIN;
1749                 }
1750         }
1751
1752         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1753         return (ret);
1754 }
1755
1756 void
1757 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
1758         enum rte_eth_event_type event)
1759 {
1760         struct rte_eth_dev_callback *cb_lst;
1761         struct rte_eth_dev_callback dev_cb;
1762
1763         rte_spinlock_lock(&rte_eth_dev_cb_lock);
1764         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
1765                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1766                         continue;
1767                 dev_cb = *cb_lst;
1768                 cb_lst->active = 1;
1769                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1770                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
1771                                                 dev_cb.cb_arg);
1772                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
1773                 cb_lst->active = 0;
1774         }
1775         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1776 }