ethdev: minor changes
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without 
8  *   modification, are permitted provided that the following conditions 
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright 
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright 
14  *       notice, this list of conditions and the following disclaimer in 
15  *       the documentation and/or other materials provided with the 
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its 
18  *       contributors may be used to endorse or promote products derived 
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  * 
33  */
34
35 #include <sys/types.h>
36 #include <sys/queue.h>
37 #include <ctype.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <stdarg.h>
42 #include <errno.h>
43 #include <stdint.h>
44 #include <inttypes.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while(0)
87 #define PROC_PRIMARY_OR_RET() do { \
88         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
89                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
90                 return; \
91         } \
92 } while(0)
93
94 /* Macros to check for invlaid function pointers in dev_ops structure */
95 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
96         if ((func) == NULL) { \
97                 PMD_DEBUG_TRACE("Function not supported\n"); \
98                 return (retval); \
99         } \
100 } while(0)
101 #define FUNC_PTR_OR_RET(func) do { \
102         if ((func) == NULL) { \
103                 PMD_DEBUG_TRACE("Function not supported\n"); \
104                 return; \
105         } \
106 } while(0)
107
108 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
109 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
110 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
111 static uint8_t nb_ports = 0;
112
113 /* spinlock for eth device callbacks */
114 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
115
116 /**
117  * The user application callback description.
118  *
119  * It contains callback address to be registered by user application,
120  * the pointer to the parameters for callback, and the event type.
121  */
122 struct rte_eth_dev_callback {
123         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
124         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
125         void *cb_arg;                           /**< Parameter for callback */
126         enum rte_eth_event_type event;          /**< Interrupt event type */
127 };
128
129 static inline void
130 rte_eth_dev_data_alloc(void)
131 {
132         const unsigned flags = 0;
133         const struct rte_memzone *mz;
134
135         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
136                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
137                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
138                                 rte_socket_id(), flags);
139         } else
140                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
141         if (mz == NULL)
142                 rte_panic("Cannot allocate memzone for ethernet port data\n");
143
144         rte_eth_dev_data = mz->addr;
145         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
146                 memset(rte_eth_dev_data, 0,
147                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
148 }
149
150 static inline struct rte_eth_dev *
151 rte_eth_dev_allocate(void)
152 {
153         struct rte_eth_dev *eth_dev;
154
155         if (nb_ports == RTE_MAX_ETHPORTS) {
156                 PMD_DEBUG_TRACE("Reached maximum number of ethernet ports\n");
157                 return NULL;
158         }
159
160         if (rte_eth_dev_data == NULL)
161                 rte_eth_dev_data_alloc();
162
163         eth_dev = &rte_eth_devices[nb_ports];
164         eth_dev->data = &rte_eth_dev_data[nb_ports];
165         eth_dev->data->port_id = nb_ports++;
166         return eth_dev;
167 }
168
169 static int
170 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
171                  struct rte_pci_device *pci_dev)
172 {
173         struct eth_driver    *eth_drv;
174         struct rte_eth_dev *eth_dev;
175         int diag;
176
177         eth_drv = (struct eth_driver *)pci_drv;
178
179         eth_dev = rte_eth_dev_allocate();
180         if (eth_dev == NULL)
181                 return -ENOMEM;
182
183         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
184                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
185                                   eth_drv->dev_private_size,
186                                   CACHE_LINE_SIZE);
187                 if (eth_dev->data->dev_private == NULL)
188                         rte_panic("Cannot allocate memzone for private port data\n");
189         }
190         eth_dev->pci_dev = pci_dev;
191         eth_dev->driver = eth_drv;
192         eth_dev->data->rx_mbuf_alloc_failed = 0;
193
194         /* init user callbacks */
195         TAILQ_INIT(&(eth_dev->callbacks));
196
197         /*
198          * Set the default maximum frame size.
199          */
200         eth_dev->data->max_frame_size = ETHER_MAX_LEN;
201
202         /* Invoke PMD device initialization function */
203         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
204         if (diag == 0)
205                 return (0);
206
207         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
208                         " failed\n", pci_drv->name,
209                         (unsigned) pci_dev->id.vendor_id,
210                         (unsigned) pci_dev->id.device_id);
211         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
212                 rte_free(eth_dev->data->dev_private);
213         nb_ports--;
214         return diag;
215 }
216
217 /**
218  * Register an Ethernet [Poll Mode] driver.
219  *
220  * Function invoked by the initialization function of an Ethernet driver
221  * to simultaneously register itself as a PCI driver and as an Ethernet
222  * Poll Mode Driver.
223  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
224  * structure embedded in the *eth_drv* structure, after having stored the
225  * address of the rte_eth_dev_init() function in the *devinit* field of
226  * the *pci_drv* structure.
227  * During the PCI probing phase, the rte_eth_dev_init() function is
228  * invoked for each PCI [Ethernet device] matching the embedded PCI
229  * identifiers provided by the driver.
230  */
231 void
232 rte_eth_driver_register(struct eth_driver *eth_drv)
233 {
234         eth_drv->pci_drv.devinit = rte_eth_dev_init;
235         rte_eal_pci_register(&eth_drv->pci_drv);
236 }
237
238 uint8_t
239 rte_eth_dev_count(void)
240 {
241         return (nb_ports);
242 }
243
244 int
245 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
246                       const struct rte_eth_conf *dev_conf)
247 {
248         struct rte_eth_dev *dev;
249         struct rte_eth_dev_info dev_info;
250         int diag;
251
252         /* This function is only safe when called from the primary process
253          * in a multi-process setup*/
254         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
255
256         if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
257                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
258                 return (-EINVAL);
259         }
260         dev = &rte_eth_devices[port_id];
261
262         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
263         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
264
265         if (dev->data->dev_started) {
266                 PMD_DEBUG_TRACE(
267                     "port %d must be stopped to allow configuration\n", port_id);
268                 return (-EBUSY);
269         }
270
271         /*
272          * Check that the numbers of RX and TX queues are not greater
273          * than the maximum number of RX and TX queues supported by the
274          * configured device.
275          */
276         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
277         if (nb_rx_q > dev_info.max_rx_queues) {
278                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
279                                 port_id, nb_rx_q, dev_info.max_rx_queues);
280                 return (-EINVAL);
281         }
282         if (nb_rx_q == 0) {
283                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
284                 return (-EINVAL);
285         }
286
287         if (nb_tx_q > dev_info.max_tx_queues) {
288                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
289                                 port_id, nb_tx_q, dev_info.max_tx_queues);
290                 return (-EINVAL);
291         }
292         if (nb_tx_q == 0) {
293                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
294                 return (-EINVAL);
295         }
296
297         /* Copy the dev_conf parameter into the dev structure */
298         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
299
300         /*
301          * If jumbo frames are enabled, check that the maximum RX packet
302          * length is supported by the configured device.
303          */
304         if (dev_conf->rxmode.jumbo_frame == 1) {
305                 if (dev_conf->rxmode.max_rx_pkt_len >
306                     dev_info.max_rx_pktlen) {
307                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
308                                 " > max valid value %u\n",
309                                 port_id,
310                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
311                                 (unsigned)dev_info.max_rx_pktlen);
312                         return (-EINVAL);
313                 }
314         } else
315                 /* Use default value */
316                 dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
317
318         /* For vmdb+dcb mode check our configuration before we go further */
319         if (dev_conf->rxmode.mq_mode == ETH_VMDQ_DCB) {
320                 const struct rte_eth_vmdq_dcb_conf *conf;
321
322                 if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
323                         PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
324                                         "!= %d\n",
325                                         port_id, ETH_VMDQ_DCB_NUM_QUEUES);
326                         return (-EINVAL);
327                 }
328                 conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
329                 if (! (conf->nb_queue_pools == ETH_16_POOLS ||
330                        conf->nb_queue_pools == ETH_32_POOLS)) {
331                     PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
332                                     "nb_queue_pools must be %d or %d\n",
333                                     port_id, ETH_16_POOLS, ETH_32_POOLS);
334                     return (-EINVAL);
335                 }
336         }
337
338         diag = (*dev->dev_ops->dev_configure)(dev, nb_rx_q, nb_tx_q);
339         if (diag != 0) {
340                 rte_free(dev->data->rx_queues);
341                 rte_free(dev->data->tx_queues);
342         }
343         return diag;
344 }
345
346 static void
347 rte_eth_dev_config_restore(uint8_t port_id)
348 {
349         struct rte_eth_dev *dev;
350         struct rte_eth_dev_info dev_info;
351         struct ether_addr addr;
352         uint16_t i;
353
354         dev = &rte_eth_devices[port_id];
355
356         rte_eth_dev_info_get(port_id, &dev_info);
357
358         /* replay MAC address configuration */
359         for (i = 0; i < dev_info.max_mac_addrs; i++) {
360                 addr = dev->data->mac_addrs[i];
361
362                 /* skip zero address */
363                 if (is_zero_ether_addr(&addr))
364                         continue;
365
366                 /* add address to the hardware */
367                 if  (*dev->dev_ops->mac_addr_add)
368                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, 0);
369                 else {
370                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
371                                         port_id);
372                         /* exit the loop but not return an error */
373                         break;
374                 }
375         }
376
377         /* replay promiscuous configuration */
378         if (rte_eth_promiscuous_get(port_id) == 1)
379                 rte_eth_promiscuous_enable(port_id);
380         else if (rte_eth_promiscuous_get(port_id) == 0)
381                 rte_eth_promiscuous_disable(port_id);
382
383         /* replay allmulticast configuration */
384         if (rte_eth_allmulticast_get(port_id) == 1)
385                 rte_eth_allmulticast_enable(port_id);
386         else if (rte_eth_allmulticast_get(port_id) == 0)
387                 rte_eth_allmulticast_disable(port_id);
388 }
389
390 int
391 rte_eth_dev_start(uint8_t port_id)
392 {
393         struct rte_eth_dev *dev;
394         int diag;
395
396         /* This function is only safe when called from the primary process
397          * in a multi-process setup*/
398         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
399
400         if (port_id >= nb_ports) {
401                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
402                 return (-EINVAL);
403         }
404         dev = &rte_eth_devices[port_id];
405
406         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
407         diag = (*dev->dev_ops->dev_start)(dev);
408         if (diag == 0)
409                 dev->data->dev_started = 1;
410         else
411                 return diag;
412
413         rte_eth_dev_config_restore(port_id);
414
415         return 0;
416 }
417
418 void
419 rte_eth_dev_stop(uint8_t port_id)
420 {
421         struct rte_eth_dev *dev;
422
423         /* This function is only safe when called from the primary process
424          * in a multi-process setup*/
425         PROC_PRIMARY_OR_RET();
426
427         if (port_id >= nb_ports) {
428                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
429                 return;
430         }
431         dev = &rte_eth_devices[port_id];
432
433         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
434         dev->data->dev_started = 0;
435         (*dev->dev_ops->dev_stop)(dev);
436 }
437
438 void
439 rte_eth_dev_close(uint8_t port_id)
440 {
441         struct rte_eth_dev *dev;
442
443         /* This function is only safe when called from the primary process
444          * in a multi-process setup*/
445         PROC_PRIMARY_OR_RET();
446
447         if (port_id >= nb_ports) {
448                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
449                 return;
450         }
451
452         dev = &rte_eth_devices[port_id];
453
454         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
455         dev->data->dev_started = 0;
456         (*dev->dev_ops->dev_close)(dev);
457 }
458
459 int
460 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
461                        uint16_t nb_rx_desc, unsigned int socket_id,
462                        const struct rte_eth_rxconf *rx_conf,
463                        struct rte_mempool *mp)
464 {
465         struct rte_eth_dev *dev;
466         struct rte_pktmbuf_pool_private *mbp_priv;
467         struct rte_eth_dev_info dev_info;
468
469         /* This function is only safe when called from the primary process
470          * in a multi-process setup*/
471         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
472
473         if (port_id >= nb_ports) {
474                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
475                 return (-EINVAL);
476         }
477         dev = &rte_eth_devices[port_id];
478         if (rx_queue_id >= dev->data->nb_rx_queues) {
479                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
480                 return (-EINVAL);
481         }
482
483         if (dev->data->dev_started) {
484                 PMD_DEBUG_TRACE(
485                     "port %d must be stopped to allow configuration\n", port_id);
486                 return -EBUSY;
487         }
488
489         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
490         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
491
492         /*
493          * Check the size of the mbuf data buffer.
494          * This value must be provided in the private data of the memory pool.
495          * First check that the memory pool has a valid private data.
496          */
497         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
498         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
499                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
500                                 mp->name, (int) mp->private_data_size,
501                                 (int) sizeof(struct rte_pktmbuf_pool_private));
502                 return (-ENOSPC);
503         }
504         mbp_priv = (struct rte_pktmbuf_pool_private *)
505                 ((char *)mp + sizeof(struct rte_mempool));
506         if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
507             dev_info.min_rx_bufsize) {
508                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
509                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
510                                 "=%d)\n",
511                                 mp->name,
512                                 (int)mbp_priv->mbuf_data_room_size,
513                                 (int)(RTE_PKTMBUF_HEADROOM +
514                                       dev_info.min_rx_bufsize),
515                                 (int)RTE_PKTMBUF_HEADROOM,
516                                 (int)dev_info.min_rx_bufsize);
517                 return (-EINVAL);
518         }
519
520         return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
521                                                socket_id, rx_conf, mp);
522 }
523
524 int
525 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
526                        uint16_t nb_tx_desc, unsigned int socket_id,
527                        const struct rte_eth_txconf *tx_conf)
528 {
529         struct rte_eth_dev *dev;
530
531         /* This function is only safe when called from the primary process
532          * in a multi-process setup*/
533         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
534
535         if (port_id >= nb_ports) {
536                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
537                 return (-EINVAL);
538         }
539         dev = &rte_eth_devices[port_id];
540         if (tx_queue_id >= dev->data->nb_tx_queues) {
541                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
542                 return (-EINVAL);
543         }
544
545         if (dev->data->dev_started) {
546                 PMD_DEBUG_TRACE(
547                     "port %d must be stopped to allow configuration\n", port_id);
548                 return -EBUSY;
549         }
550
551         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
552         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
553                                                socket_id, tx_conf);
554 }
555
556 void
557 rte_eth_promiscuous_enable(uint8_t port_id)
558 {
559         struct rte_eth_dev *dev;
560
561         if (port_id >= nb_ports) {
562                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
563                 return;
564         }
565         dev = &rte_eth_devices[port_id];
566
567         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
568         (*dev->dev_ops->promiscuous_enable)(dev);
569         dev->data->promiscuous = 1;
570 }
571
572 void
573 rte_eth_promiscuous_disable(uint8_t port_id)
574 {
575         struct rte_eth_dev *dev;
576
577         if (port_id >= nb_ports) {
578                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
579                 return;
580         }
581         dev = &rte_eth_devices[port_id];
582
583         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
584         dev->data->promiscuous = 0;
585         (*dev->dev_ops->promiscuous_disable)(dev);
586 }
587
588 int
589 rte_eth_promiscuous_get(uint8_t port_id)
590 {
591         struct rte_eth_dev *dev;
592
593         if (port_id >= nb_ports) {
594                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
595                 return -1;
596         }
597
598         dev = &rte_eth_devices[port_id];
599         return dev->data->promiscuous;
600 }
601
602 void
603 rte_eth_allmulticast_enable(uint8_t port_id)
604 {
605         struct rte_eth_dev *dev;
606
607         if (port_id >= nb_ports) {
608                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
609                 return;
610         }
611         dev = &rte_eth_devices[port_id];
612
613         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
614         (*dev->dev_ops->allmulticast_enable)(dev);
615         dev->data->all_multicast = 1;
616 }
617
618 void
619 rte_eth_allmulticast_disable(uint8_t port_id)
620 {
621         struct rte_eth_dev *dev;
622
623         if (port_id >= nb_ports) {
624                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
625                 return;
626         }
627         dev = &rte_eth_devices[port_id];
628
629         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
630         dev->data->all_multicast = 0;
631         (*dev->dev_ops->allmulticast_disable)(dev);
632 }
633
634 int
635 rte_eth_allmulticast_get(uint8_t port_id)
636 {
637         struct rte_eth_dev *dev;
638
639         if (port_id >= nb_ports) {
640                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
641                 return -1;
642         }
643
644         dev = &rte_eth_devices[port_id];
645         return dev->data->all_multicast;
646 }
647
648 static inline int
649 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
650                                 struct rte_eth_link *link)
651 {
652         struct rte_eth_link *dst = link;
653         struct rte_eth_link *src = &(dev->data->dev_link);
654
655         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
656                                         *(uint64_t *)src) == 0)
657                 return -1;
658
659         return 0;
660 }
661
662 void
663 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
664 {
665         struct rte_eth_dev *dev;
666
667         if (port_id >= nb_ports) {
668                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
669                 return;
670         }
671         dev = &rte_eth_devices[port_id];
672         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
673
674         if (dev->data->dev_conf.intr_conf.lsc != 0)
675                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
676         else {
677                 (*dev->dev_ops->link_update)(dev, 1);
678                 *eth_link = dev->data->dev_link;
679         }
680 }
681
682 void
683 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
684 {
685         struct rte_eth_dev *dev;
686
687         if (port_id >= nb_ports) {
688                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
689                 return;
690         }
691         dev = &rte_eth_devices[port_id];
692         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
693
694         if (dev->data->dev_conf.intr_conf.lsc != 0)
695                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
696         else {
697                 (*dev->dev_ops->link_update)(dev, 0);
698                 *eth_link = dev->data->dev_link;
699         }
700 }
701
702 void
703 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
704 {
705         struct rte_eth_dev *dev;
706
707         if (port_id >= nb_ports) {
708                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
709                 return;
710         }
711         dev = &rte_eth_devices[port_id];
712
713         FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
714         (*dev->dev_ops->stats_get)(dev, stats);
715         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
716 }
717
718 void
719 rte_eth_stats_reset(uint8_t port_id)
720 {
721         struct rte_eth_dev *dev;
722
723         if (port_id >= nb_ports) {
724                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
725                 return;
726         }
727         dev = &rte_eth_devices[port_id];
728
729         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
730         (*dev->dev_ops->stats_reset)(dev);
731 }
732
733 void
734 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
735 {
736         struct rte_eth_dev *dev;
737
738         if (port_id >= nb_ports) {
739                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
740                 return;
741         }
742         dev = &rte_eth_devices[port_id];
743
744         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
745         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
746         dev_info->pci_dev = dev->pci_dev;
747         dev_info->driver_name = dev->driver->pci_drv.name;
748 }
749
750 void
751 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
752 {
753         struct rte_eth_dev *dev;
754
755         if (port_id >= nb_ports) {
756                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
757                 return;
758         }
759         dev = &rte_eth_devices[port_id];
760         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
761 }
762
763 int
764 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
765 {
766         struct rte_eth_dev *dev;
767
768         if (port_id >= nb_ports) {
769                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
770                 return (-ENODEV);
771         }
772         dev = &rte_eth_devices[port_id];
773         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
774                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
775                 return (-ENOSYS);
776         }
777
778         if (vlan_id > 4095) {
779                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
780                                 port_id, (unsigned) vlan_id);
781                 return (-EINVAL);
782         }
783         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
784         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
785         return (0);
786 }
787
788 int
789 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
790                                       struct rte_fdir_filter *fdir_filter,
791                                       uint8_t queue)
792 {
793         struct rte_eth_dev *dev;
794
795         if (port_id >= nb_ports) {
796                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
797                 return (-ENODEV);
798         }
799
800         dev = &rte_eth_devices[port_id];
801
802         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
803                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
804                                 port_id, dev->data->dev_conf.fdir_conf.mode);
805                 return (-ENOSYS);
806         }
807
808         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
809              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
810             && (fdir_filter->port_src || fdir_filter->port_dst)) {
811                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
812                                 "None l4type, source & destinations ports " \
813                                 "should be null!\n");
814                 return (-EINVAL);
815         }
816
817         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
818         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
819                                                                 queue);
820 }
821
822 int
823 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
824                                          struct rte_fdir_filter *fdir_filter,
825                                          uint8_t queue)
826 {
827         struct rte_eth_dev *dev;
828
829         if (port_id >= nb_ports) {
830                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
831                 return (-ENODEV);
832         }
833
834         dev = &rte_eth_devices[port_id];
835
836         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
837                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
838                                 port_id, dev->data->dev_conf.fdir_conf.mode);
839                 return (-ENOSYS);
840         }
841
842         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
843              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
844             && (fdir_filter->port_src || fdir_filter->port_dst)) {
845                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
846                                 "None l4type, source & destinations ports " \
847                                 "should be null!\n");
848                 return (-EINVAL);
849         }
850
851         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
852         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
853                                                                 queue);
854
855 }
856
857 int
858 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
859                                          struct rte_fdir_filter *fdir_filter)
860 {
861         struct rte_eth_dev *dev;
862
863         if (port_id >= nb_ports) {
864                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
865                 return (-ENODEV);
866         }
867
868         dev = &rte_eth_devices[port_id];
869
870         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
871                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
872                                 port_id, dev->data->dev_conf.fdir_conf.mode);
873                 return (-ENOSYS);
874         }
875
876         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
877              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
878             && (fdir_filter->port_src || fdir_filter->port_dst)) {
879                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
880                                 "None l4type source & destinations ports " \
881                                 "should be null!\n");
882                 return (-EINVAL);
883         }
884
885         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
886         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
887 }
888
889 int
890 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
891 {
892         struct rte_eth_dev *dev;
893
894         if (port_id >= nb_ports) {
895                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
896                 return (-ENODEV);
897         }
898
899         dev = &rte_eth_devices[port_id];
900         if (! (dev->data->dev_conf.fdir_conf.mode)) {
901                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
902                 return (-ENOSYS);
903         }
904
905         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
906
907         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
908         return (0);
909 }
910
911 int
912 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
913                                     struct rte_fdir_filter *fdir_filter,
914                                     uint16_t soft_id, uint8_t queue,
915                                     uint8_t drop)
916 {
917         struct rte_eth_dev *dev;
918
919         if (port_id >= nb_ports) {
920                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
921                 return (-ENODEV);
922         }
923
924         dev = &rte_eth_devices[port_id];
925
926         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
927                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
928                                 port_id, dev->data->dev_conf.fdir_conf.mode);
929                 return (-ENOSYS);
930         }
931
932         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
933              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
934             && (fdir_filter->port_src || fdir_filter->port_dst)) {
935                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
936                                 "None l4type, source & destinations ports " \
937                                 "should be null!\n");
938                 return (-EINVAL);
939         }
940
941         /* For now IPv6 is not supported with perfect filter */
942         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
943                 return (-ENOTSUP);
944
945         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
946         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
947                                                                 soft_id, queue,
948                                                                 drop);
949 }
950
951 int
952 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
953                                        struct rte_fdir_filter *fdir_filter,
954                                        uint16_t soft_id, uint8_t queue,
955                                        uint8_t drop)
956 {
957         struct rte_eth_dev *dev;
958
959         if (port_id >= nb_ports) {
960                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
961                 return (-ENODEV);
962         }
963
964         dev = &rte_eth_devices[port_id];
965
966         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
967                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
968                                 port_id, dev->data->dev_conf.fdir_conf.mode);
969                 return (-ENOSYS);
970         }
971
972         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
973              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
974             && (fdir_filter->port_src || fdir_filter->port_dst)) {
975                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
976                                 "None l4type, source & destinations ports " \
977                                 "should be null!\n");
978                 return (-EINVAL);
979         }
980
981         /* For now IPv6 is not supported with perfect filter */
982         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
983                 return (-ENOTSUP);
984
985         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
986         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
987                                                         soft_id, queue, drop);
988 }
989
990 int
991 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
992                                        struct rte_fdir_filter *fdir_filter,
993                                        uint16_t soft_id)
994 {
995         struct rte_eth_dev *dev;
996
997         if (port_id >= nb_ports) {
998                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
999                 return (-ENODEV);
1000         }
1001
1002         dev = &rte_eth_devices[port_id];
1003
1004         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1005                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1006                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1007                 return (-ENOSYS);
1008         }
1009
1010         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1011              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1012             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1013                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1014                                 "None l4type, source & destinations ports " \
1015                                 "should be null!\n");
1016                 return (-EINVAL);
1017         }
1018
1019         /* For now IPv6 is not supported with perfect filter */
1020         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1021                 return (-ENOTSUP);
1022
1023         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1024         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
1025                                                                 soft_id);
1026 }
1027
1028 int
1029 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1030 {
1031         struct rte_eth_dev *dev;
1032
1033         if (port_id >= nb_ports) {
1034                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1035                 return (-ENODEV);
1036         }
1037
1038         dev = &rte_eth_devices[port_id];
1039         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1040                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1041                 return (-ENOSYS);
1042         }
1043
1044         /* IPv6 mask are not supported */
1045         if (fdir_mask->src_ipv6_mask)
1046                 return (-ENOTSUP);
1047
1048         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1049         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1050 }
1051
1052 int
1053 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1054 {
1055         struct rte_eth_dev *dev;
1056
1057         if (port_id >= nb_ports) {
1058                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1059                 return (-ENODEV);
1060         }
1061
1062         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1063                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1064                 return (-EINVAL);
1065         }
1066
1067         dev = &rte_eth_devices[port_id];
1068
1069         /* High water, low water validation are device specific */
1070         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1071         if  (*dev->dev_ops->flow_ctrl_set)
1072                 return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1073
1074         return -ENOTSUP;
1075 }
1076
1077 int
1078 rte_eth_led_on(uint8_t port_id)
1079 {
1080         struct rte_eth_dev *dev;
1081
1082         if (port_id >= nb_ports) {
1083                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1084                 return (-ENODEV);
1085         }
1086
1087         dev = &rte_eth_devices[port_id];
1088         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1089         return ((*dev->dev_ops->dev_led_on)(dev));
1090 }
1091
1092 int
1093 rte_eth_led_off(uint8_t port_id)
1094 {
1095         struct rte_eth_dev *dev;
1096
1097         if (port_id >= nb_ports) {
1098                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1099                 return (-ENODEV);
1100         }
1101
1102         dev = &rte_eth_devices[port_id];
1103         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1104         return ((*dev->dev_ops->dev_led_off)(dev));
1105 }
1106
1107 /*
1108  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1109  * an empty spot.
1110  */
1111 static inline int
1112 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1113 {
1114         struct rte_eth_dev_info dev_info;
1115         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1116         unsigned i;
1117
1118         rte_eth_dev_info_get(port_id, &dev_info);
1119
1120         for (i = 0; i < dev_info.max_mac_addrs; i++)
1121                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1122                         return i;
1123
1124         return -1;
1125 }
1126
1127 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
1128
1129 int
1130 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
1131                 uint32_t pool)
1132 {
1133         struct rte_eth_dev *dev;
1134         int index;
1135
1136         if (port_id >= nb_ports) {
1137                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1138                 return (-ENODEV);
1139         }
1140         dev = &rte_eth_devices[port_id];
1141         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
1142
1143         if (is_zero_ether_addr(addr)) {
1144                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", port_id);
1145                 return (-EINVAL);
1146         }
1147
1148         /* Check if it's already there, and do nothing */
1149         index = get_mac_addr_index(port_id, addr);
1150         if (index >= 0)
1151                 return 0;
1152
1153         index = get_mac_addr_index(port_id, &null_mac_addr);
1154         if (index < 0) {
1155                 PMD_DEBUG_TRACE("port %d: MAC address array full\n", port_id);
1156                 return (-ENOSPC);
1157         }
1158
1159         /* Update NIC */
1160         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
1161
1162         /* Update address in NIC data structure */
1163         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
1164
1165         return 0;
1166 }
1167
1168 int
1169 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
1170 {
1171         struct rte_eth_dev *dev;
1172         int index;
1173
1174         if (port_id >= nb_ports) {
1175                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1176                 return (-ENODEV);
1177         }
1178         dev = &rte_eth_devices[port_id];
1179         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
1180
1181         index = get_mac_addr_index(port_id, addr);
1182         if (index == 0) {
1183                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
1184                 return (-EADDRINUSE);
1185         } else if (index < 0)
1186                 return 0;  /* Do nothing if address wasn't found */
1187
1188         /* Update NIC */
1189         (*dev->dev_ops->mac_addr_remove)(dev, index);
1190
1191         /* Update address in NIC data structure */
1192         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
1193
1194         return 0;
1195 }
1196
1197 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1198 uint16_t
1199 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
1200                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1201 {
1202         struct rte_eth_dev *dev;
1203
1204         if (port_id >= nb_ports) {
1205                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1206                 return 0;
1207         }
1208         dev = &rte_eth_devices[port_id];
1209         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
1210         if (queue_id >= dev->data->nb_rx_queues) {
1211                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
1212                 return 0;
1213         }
1214         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
1215                                                 rx_pkts, nb_pkts);
1216 }
1217
1218 uint16_t
1219 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
1220                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1221 {
1222         struct rte_eth_dev *dev;
1223
1224         if (port_id >= nb_ports) {
1225                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1226                 return 0;
1227         }
1228         dev = &rte_eth_devices[port_id];
1229
1230         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
1231         if (queue_id >= dev->data->nb_tx_queues) {
1232                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
1233                 return 0;
1234         }
1235         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
1236                                                 tx_pkts, nb_pkts);
1237 }
1238 #endif
1239
1240 int
1241 rte_eth_dev_callback_register(uint8_t port_id,
1242                         enum rte_eth_event_type event,
1243                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
1244 {
1245         int ret = -1;
1246         struct rte_eth_dev *dev;
1247         struct rte_eth_dev_callback *user_cb = NULL;
1248
1249         if (!cb_fn)
1250                 return -1;
1251         if (port_id >= nb_ports) {
1252                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1253                 return -1;
1254         }
1255         dev = &rte_eth_devices[port_id];
1256         rte_spinlock_lock(&rte_eth_dev_cb_lock);
1257         TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
1258                 if (user_cb->cb_fn == cb_fn &&
1259                         user_cb->cb_arg == cb_arg &&
1260                         user_cb->event == event) {
1261                         ret = 0;
1262                         goto out;
1263                 }
1264         }
1265         user_cb = rte_malloc("INTR_USER_CALLBACK",
1266                 sizeof(struct rte_eth_dev_callback), 0);
1267         if (!user_cb)
1268                 goto out;
1269         user_cb->cb_fn = cb_fn;
1270         user_cb->cb_arg = cb_arg;
1271         user_cb->event = event;
1272         TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
1273         ret = 0;
1274
1275 out:
1276         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1277
1278         return ret;
1279 }
1280
1281 int
1282 rte_eth_dev_callback_unregister(uint8_t port_id,
1283                         enum rte_eth_event_type event,
1284                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
1285 {
1286         int ret = -1;
1287         struct rte_eth_dev *dev;
1288         struct rte_eth_dev_callback *cb_lst = NULL;
1289
1290         if (!cb_fn)
1291                 return -1;
1292         if (port_id >= nb_ports) {
1293                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1294                 return -1;
1295         }
1296         dev = &rte_eth_devices[port_id];
1297         rte_spinlock_lock(&rte_eth_dev_cb_lock);
1298         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
1299                 if (cb_lst->cb_fn != cb_fn || cb_lst->event != event)
1300                         continue;
1301                 if (cb_lst->cb_arg == (void *)-1 ||
1302                                 cb_lst->cb_arg == cb_arg) {
1303                         TAILQ_REMOVE(&(dev->callbacks), cb_lst, next);
1304                         rte_free(cb_lst);
1305                         ret = 0;
1306                 }
1307         }
1308
1309         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1310
1311         return ret;
1312 }
1313
1314 void
1315 _rte_eth_dev_callback_process(struct rte_eth_dev *dev, enum rte_eth_event_type event)
1316 {
1317         struct rte_eth_dev_callback *cb_lst = NULL;
1318         struct rte_eth_dev_callback dev_cb;
1319
1320         rte_spinlock_lock(&rte_eth_dev_cb_lock);
1321         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
1322                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1323                         continue;
1324                 dev_cb = *cb_lst;
1325                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1326                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
1327                                                 dev_cb.cb_arg);
1328                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
1329         }
1330         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1331 }
1332