2e85ab7a0fac7138f917a226f38e41f88503712e
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without 
8  *   modification, are permitted provided that the following conditions 
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright 
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright 
14  *       notice, this list of conditions and the following disclaimer in 
15  *       the documentation and/or other materials provided with the 
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its 
18  *       contributors may be used to endorse or promote products derived 
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  * 
33  */
34
35 #include <sys/types.h>
36 #include <sys/queue.h>
37 #include <ctype.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <stdarg.h>
42 #include <errno.h>
43 #include <stdint.h>
44 #include <inttypes.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68
69 #include "rte_ether.h"
70 #include "rte_ethdev.h"
71
72 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
73 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
74                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
75         } while (0)
76 #else
77 #define PMD_DEBUG_TRACE(fmt, args...)
78 #endif
79
80 /* Macros for checking for restricting functions to primary instance only */
81 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
82         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
83                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
84                 return (retval); \
85         } \
86 } while(0)
87 #define PROC_PRIMARY_OR_RET() do { \
88         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
89                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
90                 return; \
91         } \
92 } while(0)
93
94 /* Macros to check for invlaid function pointers in dev_ops structure */
95 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
96         if ((func) == NULL) { \
97                 PMD_DEBUG_TRACE("Function not supported\n"); \
98                 return (retval); \
99         } \
100 } while(0)
101 #define FUNC_PTR_OR_RET(func) do { \
102         if ((func) == NULL) { \
103                 PMD_DEBUG_TRACE("Function not supported\n"); \
104                 return; \
105         } \
106 } while(0)
107
108 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
109 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
110 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
111 static uint8_t nb_ports = 0;
112
113 /* spinlock for eth device callbacks */
114 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
115
116 /**
117  * The user application callback description.
118  *
119  * It contains callback address to be registered by user application,
120  * the pointer to the parameters for callback, and the event type.
121  */
122 struct rte_eth_dev_callback {
123         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
124         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
125         void *cb_arg;                           /**< Parameter for callback */
126         enum rte_eth_event_type event;          /**< Interrupt event type */
127         uint32_t active;                        /**< Callback is executing */
128 };
129
130 enum {
131         STAT_QMAP_TX = 0,
132         STAT_QMAP_RX
133 };
134
135 static inline void
136 rte_eth_dev_data_alloc(void)
137 {
138         const unsigned flags = 0;
139         const struct rte_memzone *mz;
140
141         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
142                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
143                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
144                                 rte_socket_id(), flags);
145         } else
146                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
147         if (mz == NULL)
148                 rte_panic("Cannot allocate memzone for ethernet port data\n");
149
150         rte_eth_dev_data = mz->addr;
151         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
152                 memset(rte_eth_dev_data, 0,
153                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
154 }
155
156 static inline struct rte_eth_dev *
157 rte_eth_dev_allocate(void)
158 {
159         struct rte_eth_dev *eth_dev;
160
161         if (nb_ports == RTE_MAX_ETHPORTS) {
162                 PMD_DEBUG_TRACE("Reached maximum number of ethernet ports\n");
163                 return NULL;
164         }
165
166         if (rte_eth_dev_data == NULL)
167                 rte_eth_dev_data_alloc();
168
169         eth_dev = &rte_eth_devices[nb_ports];
170         eth_dev->data = &rte_eth_dev_data[nb_ports];
171         eth_dev->data->port_id = nb_ports++;
172         return eth_dev;
173 }
174
175 static int
176 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
177                  struct rte_pci_device *pci_dev)
178 {
179         struct eth_driver    *eth_drv;
180         struct rte_eth_dev *eth_dev;
181         int diag;
182
183         eth_drv = (struct eth_driver *)pci_drv;
184
185         eth_dev = rte_eth_dev_allocate();
186         if (eth_dev == NULL)
187                 return -ENOMEM;
188
189         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
190                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
191                                   eth_drv->dev_private_size,
192                                   CACHE_LINE_SIZE);
193                 if (eth_dev->data->dev_private == NULL)
194                         rte_panic("Cannot allocate memzone for private port data\n");
195         }
196         eth_dev->pci_dev = pci_dev;
197         eth_dev->driver = eth_drv;
198         eth_dev->data->rx_mbuf_alloc_failed = 0;
199
200         /* init user callbacks */
201         TAILQ_INIT(&(eth_dev->callbacks));
202
203         /*
204          * Set the default maximum frame size.
205          */
206         eth_dev->data->max_frame_size = ETHER_MAX_LEN;
207
208         /* Invoke PMD device initialization function */
209         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
210         if (diag == 0)
211                 return (0);
212
213         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
214                         " failed\n", pci_drv->name,
215                         (unsigned) pci_dev->id.vendor_id,
216                         (unsigned) pci_dev->id.device_id);
217         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
218                 rte_free(eth_dev->data->dev_private);
219         nb_ports--;
220         return diag;
221 }
222
223 /**
224  * Register an Ethernet [Poll Mode] driver.
225  *
226  * Function invoked by the initialization function of an Ethernet driver
227  * to simultaneously register itself as a PCI driver and as an Ethernet
228  * Poll Mode Driver.
229  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
230  * structure embedded in the *eth_drv* structure, after having stored the
231  * address of the rte_eth_dev_init() function in the *devinit* field of
232  * the *pci_drv* structure.
233  * During the PCI probing phase, the rte_eth_dev_init() function is
234  * invoked for each PCI [Ethernet device] matching the embedded PCI
235  * identifiers provided by the driver.
236  */
237 void
238 rte_eth_driver_register(struct eth_driver *eth_drv)
239 {
240         eth_drv->pci_drv.devinit = rte_eth_dev_init;
241         rte_eal_pci_register(&eth_drv->pci_drv);
242 }
243
244 int
245 rte_eth_dev_socket_id(uint8_t port_id)
246 {
247         if (port_id >= nb_ports)
248                 return -1;
249         return rte_eth_devices[port_id].pci_dev->numa_node;
250 }
251
252 uint8_t
253 rte_eth_dev_count(void)
254 {
255         return (nb_ports);
256 }
257
258 static int
259 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
260 {
261         uint16_t old_nb_queues = dev->data->nb_rx_queues;
262         void **rxq;
263         unsigned i;
264
265         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
266
267         if (dev->data->rx_queues == NULL) {
268                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
269                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
270                                 CACHE_LINE_SIZE);
271                 if (dev->data->rx_queues == NULL) {
272                         dev->data->nb_rx_queues = 0;
273                         return -(ENOMEM);
274                 }
275         } else {
276                 rxq = dev->data->rx_queues;
277
278                 for (i = nb_queues; i < old_nb_queues; i++)
279                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
280                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
281                                 CACHE_LINE_SIZE);
282                 if (rxq == NULL)
283                         return -(ENOMEM);
284
285                 if (nb_queues > old_nb_queues)
286                         memset(rxq + old_nb_queues, 0,
287                                 sizeof(rxq[0]) * (nb_queues - old_nb_queues));
288
289                 dev->data->rx_queues = rxq;
290
291         }
292         dev->data->nb_rx_queues = nb_queues;
293         return (0);
294 }
295
296 static int
297 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
298 {
299         uint16_t old_nb_queues = dev->data->nb_tx_queues;
300         void **txq;
301         unsigned i;
302
303         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
304
305         if (dev->data->tx_queues == NULL) {
306                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
307                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
308                                 CACHE_LINE_SIZE);
309                 if (dev->data->tx_queues == NULL) {
310                         dev->data->nb_tx_queues = 0;
311                         return -(ENOMEM);
312                 }
313         } else {
314                 txq = dev->data->tx_queues;
315
316                 for (i = nb_queues; i < old_nb_queues; i++)
317                         (*dev->dev_ops->tx_queue_release)(txq[i]);
318                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
319                                 CACHE_LINE_SIZE);
320                 if (txq == NULL)
321                         return -(ENOMEM);
322
323                 if (nb_queues > old_nb_queues)
324                         memset(txq + old_nb_queues, 0,
325                                 sizeof(txq[0]) * (nb_queues - old_nb_queues));
326
327                 dev->data->tx_queues = txq;
328
329         }
330         dev->data->nb_tx_queues = nb_queues;
331         return (0);
332 }
333
334 static int
335 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
336                       const struct rte_eth_conf *dev_conf)
337 {
338         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
339
340         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
341                 /* check multi-queue mode */
342                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) || 
343                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
344                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
345                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
346                         /* SRIOV only works in VMDq enable mode */
347                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
348                                         "wrong VMDQ mq_mode rx %d tx %d\n", 
349                                         port_id, dev_conf->rxmode.mq_mode,
350                                         dev_conf->txmode.mq_mode);
351                         return (-EINVAL);
352                 }
353
354                 switch (dev_conf->rxmode.mq_mode) {
355                 case ETH_MQ_RX_VMDQ_RSS:
356                 case ETH_MQ_RX_VMDQ_DCB:
357                 case ETH_MQ_RX_VMDQ_DCB_RSS:
358                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
359                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
360                                         "unsupported VMDQ mq_mode rx %d\n", 
361                                         port_id, dev_conf->rxmode.mq_mode);
362                         return (-EINVAL);
363                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
364                         /* if nothing mq mode configure, use default scheme */
365                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
366                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
367                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
368                         break;
369                 }
370
371                 switch (dev_conf->txmode.mq_mode) {
372                 case ETH_MQ_TX_VMDQ_DCB:
373                         /* DCB VMDQ in SRIOV mode, not implement yet */
374                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
375                                         "unsupported VMDQ mq_mode tx %d\n", 
376                                         port_id, dev_conf->txmode.mq_mode);
377                         return (-EINVAL);
378                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
379                         /* if nothing mq mode configure, use default scheme */
380                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
381                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
382                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
383                         break;
384                 }
385
386                 /* check valid queue number */
387                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
388                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
389                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
390                                     "queue number must less equal to %d\n", 
391                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
392                         return (-EINVAL);
393                 }
394         } else {
395                 /* For vmdb+dcb mode check our configuration before we go further */
396                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
397                         const struct rte_eth_vmdq_dcb_conf *conf;
398                         
399                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
400                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
401                                                 "!= %d\n",
402                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
403                                 return (-EINVAL);
404                         }
405                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
406                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
407                                conf->nb_queue_pools == ETH_32_POOLS)) {
408                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
409                                                 "nb_queue_pools must be %d or %d\n",
410                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
411                                 return (-EINVAL);
412                         }
413                 }
414                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
415                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
416                         
417                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
418                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
419                                                 "!= %d\n",
420                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
421                                 return (-EINVAL);
422                         }
423                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
424                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
425                                conf->nb_queue_pools == ETH_32_POOLS)) {
426                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
427                                                 "nb_queue_pools != %d or nb_queue_pools "
428                                                 "!= %d\n",
429                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
430                                 return (-EINVAL);
431                         }
432                 }
433                 
434                 /* For DCB mode check our configuration before we go further */
435                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
436                         const struct rte_eth_dcb_rx_conf *conf;
437                         
438                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
439                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
440                                                 "!= %d\n",
441                                                 port_id, ETH_DCB_NUM_QUEUES);
442                                 return (-EINVAL);
443                         }
444                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
445                         if (! (conf->nb_tcs == ETH_4_TCS ||
446                                conf->nb_tcs == ETH_8_TCS)) {
447                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
448                                                 "nb_tcs != %d or nb_tcs "
449                                                 "!= %d\n",
450                                                 port_id, ETH_4_TCS, ETH_8_TCS);
451                                 return (-EINVAL);
452                         }
453                 }
454                 
455                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
456                         const struct rte_eth_dcb_tx_conf *conf;
457                         
458                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
459                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
460                                                 "!= %d\n",
461                                                 port_id, ETH_DCB_NUM_QUEUES);
462                                 return (-EINVAL);
463                         }
464                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
465                         if (! (conf->nb_tcs == ETH_4_TCS ||
466                                conf->nb_tcs == ETH_8_TCS)) {
467                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
468                                                 "nb_tcs != %d or nb_tcs "
469                                                 "!= %d\n",
470                                                 port_id, ETH_4_TCS, ETH_8_TCS);
471                                 return (-EINVAL);
472                         }
473                 }
474         }
475         return 0;
476 }
477
478 int
479 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
480                       const struct rte_eth_conf *dev_conf)
481 {
482         struct rte_eth_dev *dev;
483         struct rte_eth_dev_info dev_info;
484         int diag;
485
486         /* This function is only safe when called from the primary process
487          * in a multi-process setup*/
488         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
489
490         if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
491                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
492                 return (-EINVAL);
493         }
494         dev = &rte_eth_devices[port_id];
495
496         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
497         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
498
499         if (dev->data->dev_started) {
500                 PMD_DEBUG_TRACE(
501                     "port %d must be stopped to allow configuration\n", port_id);
502                 return (-EBUSY);
503         }
504
505         /*
506          * Check that the numbers of RX and TX queues are not greater
507          * than the maximum number of RX and TX queues supported by the
508          * configured device.
509          */
510         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
511         if (nb_rx_q > dev_info.max_rx_queues) {
512                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
513                                 port_id, nb_rx_q, dev_info.max_rx_queues);
514                 return (-EINVAL);
515         }
516         if (nb_rx_q == 0) {
517                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
518                 return (-EINVAL);
519         }
520
521         if (nb_tx_q > dev_info.max_tx_queues) {
522                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
523                                 port_id, nb_tx_q, dev_info.max_tx_queues);
524                 return (-EINVAL);
525         }
526         if (nb_tx_q == 0) {
527                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
528                 return (-EINVAL);
529         }
530
531         /* Copy the dev_conf parameter into the dev structure */
532         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
533
534         /*
535          * If jumbo frames are enabled, check that the maximum RX packet
536          * length is supported by the configured device.
537          */
538         if (dev_conf->rxmode.jumbo_frame == 1) {
539                 if (dev_conf->rxmode.max_rx_pkt_len >
540                     dev_info.max_rx_pktlen) {
541                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
542                                 " > max valid value %u\n",
543                                 port_id,
544                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
545                                 (unsigned)dev_info.max_rx_pktlen);
546                         return (-EINVAL);
547                 }
548                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
549                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
550                                 " < min valid value %u\n",
551                                 port_id,
552                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
553                                 (unsigned)ETHER_MIN_LEN);
554                         return (-EINVAL);
555                 }
556         } else
557                 /* Use default value */
558                 dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
559
560         /* multipe queue mode checking */
561         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
562         if (diag != 0) {
563                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
564                                 port_id, diag);
565                 return diag;
566         }
567
568         /*
569          * Setup new number of RX/TX queues and reconfigure device.
570          */
571         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
572         if (diag != 0) {
573                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
574                                 port_id, diag);
575                 return diag;
576         }
577
578         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
579         if (diag != 0) {
580                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
581                                 port_id, diag);
582                 rte_eth_dev_rx_queue_config(dev, 0);
583                 return diag;
584         }
585
586         diag = (*dev->dev_ops->dev_configure)(dev);
587         if (diag != 0) {
588                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
589                                 port_id, diag);
590                 rte_eth_dev_rx_queue_config(dev, 0);
591                 rte_eth_dev_tx_queue_config(dev, 0);
592                 return diag;
593         }
594
595         return 0;
596 }
597
598 static void
599 rte_eth_dev_config_restore(uint8_t port_id)
600 {
601         struct rte_eth_dev *dev;
602         struct rte_eth_dev_info dev_info;
603         struct ether_addr addr;
604         uint16_t i;
605         uint32_t pool = 0;
606
607         dev = &rte_eth_devices[port_id];
608
609         rte_eth_dev_info_get(port_id, &dev_info);
610
611         if (RTE_ETH_DEV_SRIOV(dev).active)
612                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
613
614         /* replay MAC address configuration */
615         for (i = 0; i < dev_info.max_mac_addrs; i++) {
616                 addr = dev->data->mac_addrs[i];
617
618                 /* skip zero address */
619                 if (is_zero_ether_addr(&addr))
620                         continue;
621
622                 /* add address to the hardware */
623                 if  (*dev->dev_ops->mac_addr_add)
624                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
625                 else {
626                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
627                                         port_id);
628                         /* exit the loop but not return an error */
629                         break;
630                 }
631         }
632
633         /* replay promiscuous configuration */
634         if (rte_eth_promiscuous_get(port_id) == 1)
635                 rte_eth_promiscuous_enable(port_id);
636         else if (rte_eth_promiscuous_get(port_id) == 0)
637                 rte_eth_promiscuous_disable(port_id);
638
639         /* replay allmulticast configuration */
640         if (rte_eth_allmulticast_get(port_id) == 1)
641                 rte_eth_allmulticast_enable(port_id);
642         else if (rte_eth_allmulticast_get(port_id) == 0)
643                 rte_eth_allmulticast_disable(port_id);
644 }
645
646 int
647 rte_eth_dev_start(uint8_t port_id)
648 {
649         struct rte_eth_dev *dev;
650         int diag;
651
652         /* This function is only safe when called from the primary process
653          * in a multi-process setup*/
654         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
655
656         if (port_id >= nb_ports) {
657                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
658                 return (-EINVAL);
659         }
660         dev = &rte_eth_devices[port_id];
661
662         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
663         diag = (*dev->dev_ops->dev_start)(dev);
664         if (diag == 0)
665                 dev->data->dev_started = 1;
666         else
667                 return diag;
668
669         rte_eth_dev_config_restore(port_id);
670
671         return 0;
672 }
673
674 void
675 rte_eth_dev_stop(uint8_t port_id)
676 {
677         struct rte_eth_dev *dev;
678
679         /* This function is only safe when called from the primary process
680          * in a multi-process setup*/
681         PROC_PRIMARY_OR_RET();
682
683         if (port_id >= nb_ports) {
684                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
685                 return;
686         }
687         dev = &rte_eth_devices[port_id];
688
689         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
690         dev->data->dev_started = 0;
691         (*dev->dev_ops->dev_stop)(dev);
692 }
693
694 void
695 rte_eth_dev_close(uint8_t port_id)
696 {
697         struct rte_eth_dev *dev;
698
699         /* This function is only safe when called from the primary process
700          * in a multi-process setup*/
701         PROC_PRIMARY_OR_RET();
702
703         if (port_id >= nb_ports) {
704                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
705                 return;
706         }
707
708         dev = &rte_eth_devices[port_id];
709
710         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
711         dev->data->dev_started = 0;
712         (*dev->dev_ops->dev_close)(dev);
713 }
714
715 int
716 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
717                        uint16_t nb_rx_desc, unsigned int socket_id,
718                        const struct rte_eth_rxconf *rx_conf,
719                        struct rte_mempool *mp)
720 {
721         struct rte_eth_dev *dev;
722         struct rte_pktmbuf_pool_private *mbp_priv;
723         struct rte_eth_dev_info dev_info;
724
725         /* This function is only safe when called from the primary process
726          * in a multi-process setup*/
727         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
728
729         if (port_id >= nb_ports) {
730                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
731                 return (-EINVAL);
732         }
733         dev = &rte_eth_devices[port_id];
734         if (rx_queue_id >= dev->data->nb_rx_queues) {
735                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
736                 return (-EINVAL);
737         }
738
739         if (dev->data->dev_started) {
740                 PMD_DEBUG_TRACE(
741                     "port %d must be stopped to allow configuration\n", port_id);
742                 return -EBUSY;
743         }
744
745         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
746         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
747
748         /*
749          * Check the size of the mbuf data buffer.
750          * This value must be provided in the private data of the memory pool.
751          * First check that the memory pool has a valid private data.
752          */
753         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
754         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
755                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
756                                 mp->name, (int) mp->private_data_size,
757                                 (int) sizeof(struct rte_pktmbuf_pool_private));
758                 return (-ENOSPC);
759         }
760         mbp_priv = (struct rte_pktmbuf_pool_private *)
761                 ((char *)mp + sizeof(struct rte_mempool));
762         if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
763             dev_info.min_rx_bufsize) {
764                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
765                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
766                                 "=%d)\n",
767                                 mp->name,
768                                 (int)mbp_priv->mbuf_data_room_size,
769                                 (int)(RTE_PKTMBUF_HEADROOM +
770                                       dev_info.min_rx_bufsize),
771                                 (int)RTE_PKTMBUF_HEADROOM,
772                                 (int)dev_info.min_rx_bufsize);
773                 return (-EINVAL);
774         }
775
776         return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
777                                                socket_id, rx_conf, mp);
778 }
779
780 int
781 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
782                        uint16_t nb_tx_desc, unsigned int socket_id,
783                        const struct rte_eth_txconf *tx_conf)
784 {
785         struct rte_eth_dev *dev;
786
787         /* This function is only safe when called from the primary process
788          * in a multi-process setup*/
789         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
790
791         if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
792                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
793                 return (-EINVAL);
794         }
795         dev = &rte_eth_devices[port_id];
796         if (tx_queue_id >= dev->data->nb_tx_queues) {
797                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
798                 return (-EINVAL);
799         }
800
801         if (dev->data->dev_started) {
802                 PMD_DEBUG_TRACE(
803                     "port %d must be stopped to allow configuration\n", port_id);
804                 return -EBUSY;
805         }
806
807         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
808         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
809                                                socket_id, tx_conf);
810 }
811
812 void
813 rte_eth_promiscuous_enable(uint8_t port_id)
814 {
815         struct rte_eth_dev *dev;
816
817         if (port_id >= nb_ports) {
818                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
819                 return;
820         }
821         dev = &rte_eth_devices[port_id];
822
823         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
824         (*dev->dev_ops->promiscuous_enable)(dev);
825         dev->data->promiscuous = 1;
826 }
827
828 void
829 rte_eth_promiscuous_disable(uint8_t port_id)
830 {
831         struct rte_eth_dev *dev;
832
833         if (port_id >= nb_ports) {
834                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
835                 return;
836         }
837         dev = &rte_eth_devices[port_id];
838
839         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
840         dev->data->promiscuous = 0;
841         (*dev->dev_ops->promiscuous_disable)(dev);
842 }
843
844 int
845 rte_eth_promiscuous_get(uint8_t port_id)
846 {
847         struct rte_eth_dev *dev;
848
849         if (port_id >= nb_ports) {
850                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
851                 return -1;
852         }
853
854         dev = &rte_eth_devices[port_id];
855         return dev->data->promiscuous;
856 }
857
858 void
859 rte_eth_allmulticast_enable(uint8_t port_id)
860 {
861         struct rte_eth_dev *dev;
862
863         if (port_id >= nb_ports) {
864                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
865                 return;
866         }
867         dev = &rte_eth_devices[port_id];
868
869         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
870         (*dev->dev_ops->allmulticast_enable)(dev);
871         dev->data->all_multicast = 1;
872 }
873
874 void
875 rte_eth_allmulticast_disable(uint8_t port_id)
876 {
877         struct rte_eth_dev *dev;
878
879         if (port_id >= nb_ports) {
880                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
881                 return;
882         }
883         dev = &rte_eth_devices[port_id];
884
885         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
886         dev->data->all_multicast = 0;
887         (*dev->dev_ops->allmulticast_disable)(dev);
888 }
889
890 int
891 rte_eth_allmulticast_get(uint8_t port_id)
892 {
893         struct rte_eth_dev *dev;
894
895         if (port_id >= nb_ports) {
896                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
897                 return -1;
898         }
899
900         dev = &rte_eth_devices[port_id];
901         return dev->data->all_multicast;
902 }
903
904 static inline int
905 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
906                                 struct rte_eth_link *link)
907 {
908         struct rte_eth_link *dst = link;
909         struct rte_eth_link *src = &(dev->data->dev_link);
910
911         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
912                                         *(uint64_t *)src) == 0)
913                 return -1;
914
915         return 0;
916 }
917
918 void
919 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
920 {
921         struct rte_eth_dev *dev;
922
923         if (port_id >= nb_ports) {
924                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
925                 return;
926         }
927         dev = &rte_eth_devices[port_id];
928         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
929
930         if (dev->data->dev_conf.intr_conf.lsc != 0)
931                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
932         else {
933                 (*dev->dev_ops->link_update)(dev, 1);
934                 *eth_link = dev->data->dev_link;
935         }
936 }
937
938 void
939 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
940 {
941         struct rte_eth_dev *dev;
942
943         if (port_id >= nb_ports) {
944                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
945                 return;
946         }
947         dev = &rte_eth_devices[port_id];
948         FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
949
950         if (dev->data->dev_conf.intr_conf.lsc != 0)
951                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
952         else {
953                 (*dev->dev_ops->link_update)(dev, 0);
954                 *eth_link = dev->data->dev_link;
955         }
956 }
957
958 void
959 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
960 {
961         struct rte_eth_dev *dev;
962
963         if (port_id >= nb_ports) {
964                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
965                 return;
966         }
967         dev = &rte_eth_devices[port_id];
968
969         FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
970         (*dev->dev_ops->stats_get)(dev, stats);
971         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
972 }
973
974 void
975 rte_eth_stats_reset(uint8_t port_id)
976 {
977         struct rte_eth_dev *dev;
978
979         if (port_id >= nb_ports) {
980                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
981                 return;
982         }
983         dev = &rte_eth_devices[port_id];
984
985         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
986         (*dev->dev_ops->stats_reset)(dev);
987 }
988
989
990 static int
991 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
992                 uint8_t is_rx)
993 {
994         struct rte_eth_dev *dev;
995
996         if (port_id >= nb_ports) {
997                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
998                 return -ENODEV;
999         }
1000         dev = &rte_eth_devices[port_id];
1001
1002         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1003         return (*dev->dev_ops->queue_stats_mapping_set)
1004                         (dev, queue_id, stat_idx, is_rx);
1005 }
1006
1007
1008 int
1009 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1010                 uint8_t stat_idx)
1011 {
1012         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1013                         STAT_QMAP_TX);
1014 }
1015
1016
1017 int
1018 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1019                 uint8_t stat_idx)
1020 {
1021         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1022                         STAT_QMAP_RX);
1023 }
1024
1025
1026 void
1027 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1028 {
1029         struct rte_eth_dev *dev;
1030
1031         if (port_id >= nb_ports) {
1032                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1033                 return;
1034         }
1035         dev = &rte_eth_devices[port_id];
1036
1037         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1038         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1039         dev_info->pci_dev = dev->pci_dev;
1040         dev_info->driver_name = dev->driver->pci_drv.name;
1041 }
1042
1043 void
1044 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1045 {
1046         struct rte_eth_dev *dev;
1047
1048         if (port_id >= nb_ports) {
1049                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1050                 return;
1051         }
1052         dev = &rte_eth_devices[port_id];
1053         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1054 }
1055
1056 int
1057 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1058 {
1059         struct rte_eth_dev *dev;
1060
1061         if (port_id >= nb_ports) {
1062                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1063                 return (-ENODEV);
1064         }
1065         dev = &rte_eth_devices[port_id];
1066         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1067                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1068                 return (-ENOSYS);
1069         }
1070
1071         if (vlan_id > 4095) {
1072                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1073                                 port_id, (unsigned) vlan_id);
1074                 return (-EINVAL);
1075         }
1076         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1077         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1078         return (0);
1079 }
1080
1081 int
1082 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1083 {
1084         struct rte_eth_dev *dev;
1085
1086         if (port_id >= nb_ports) {
1087                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1088                 return (-ENODEV);
1089         }
1090
1091         dev = &rte_eth_devices[port_id];
1092         if (rx_queue_id >= dev->data->nb_rx_queues) {
1093                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1094                 return (-EINVAL);
1095         }
1096
1097         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1098         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1099
1100         return (0);
1101 }
1102
1103 int
1104 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1105 {
1106         struct rte_eth_dev *dev;
1107
1108         if (port_id >= nb_ports) {
1109                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1110                 return (-ENODEV);
1111         }
1112
1113         dev = &rte_eth_devices[port_id];
1114         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1115         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1116
1117         return (0);
1118 }
1119
1120 int
1121 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1122 {
1123         struct rte_eth_dev *dev;
1124         int ret = 0;
1125         int mask = 0;
1126         int cur, org = 0;
1127         
1128         if (port_id >= nb_ports) {
1129                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1130                 return (-ENODEV);
1131         }
1132
1133         dev = &rte_eth_devices[port_id];
1134
1135         /*check which option changed by application*/
1136         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1137         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1138         if (cur != org){
1139                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1140                 mask |= ETH_VLAN_STRIP_MASK;
1141         }
1142         
1143         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1144         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1145         if (cur != org){
1146                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1147                 mask |= ETH_VLAN_FILTER_MASK;
1148         }
1149
1150         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1151         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1152         if (cur != org){
1153                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1154                 mask |= ETH_VLAN_EXTEND_MASK;
1155         }
1156
1157         /*no change*/
1158         if(mask == 0)
1159                 return ret;
1160         
1161         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1162         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1163
1164         return ret;
1165 }
1166
1167 int
1168 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1169 {
1170         struct rte_eth_dev *dev;
1171         int ret = 0;
1172
1173         if (port_id >= nb_ports) {
1174                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1175                 return (-ENODEV);
1176         }
1177
1178         dev = &rte_eth_devices[port_id];
1179
1180         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1181                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1182
1183         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1184                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1185
1186         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1187                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1188
1189         return ret;
1190 }
1191
1192
1193 int
1194 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1195                                       struct rte_fdir_filter *fdir_filter,
1196                                       uint8_t queue)
1197 {
1198         struct rte_eth_dev *dev;
1199
1200         if (port_id >= nb_ports) {
1201                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1202                 return (-ENODEV);
1203         }
1204
1205         dev = &rte_eth_devices[port_id];
1206
1207         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1208                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1209                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1210                 return (-ENOSYS);
1211         }
1212
1213         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1214              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1215             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1216                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1217                                 "None l4type, source & destinations ports " \
1218                                 "should be null!\n");
1219                 return (-EINVAL);
1220         }
1221
1222         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1223         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1224                                                                 queue);
1225 }
1226
1227 int
1228 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1229                                          struct rte_fdir_filter *fdir_filter,
1230                                          uint8_t queue)
1231 {
1232         struct rte_eth_dev *dev;
1233
1234         if (port_id >= nb_ports) {
1235                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1236                 return (-ENODEV);
1237         }
1238
1239         dev = &rte_eth_devices[port_id];
1240
1241         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1242                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1243                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1244                 return (-ENOSYS);
1245         }
1246
1247         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1248              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1249             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1250                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1251                                 "None l4type, source & destinations ports " \
1252                                 "should be null!\n");
1253                 return (-EINVAL);
1254         }
1255
1256         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1257         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1258                                                                 queue);
1259
1260 }
1261
1262 int
1263 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1264                                          struct rte_fdir_filter *fdir_filter)
1265 {
1266         struct rte_eth_dev *dev;
1267
1268         if (port_id >= nb_ports) {
1269                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1270                 return (-ENODEV);
1271         }
1272
1273         dev = &rte_eth_devices[port_id];
1274
1275         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1276                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1277                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1278                 return (-ENOSYS);
1279         }
1280
1281         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1282              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1283             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1284                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1285                                 "None l4type source & destinations ports " \
1286                                 "should be null!\n");
1287                 return (-EINVAL);
1288         }
1289
1290         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1291         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1292 }
1293
1294 int
1295 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1296 {
1297         struct rte_eth_dev *dev;
1298
1299         if (port_id >= nb_ports) {
1300                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1301                 return (-ENODEV);
1302         }
1303
1304         dev = &rte_eth_devices[port_id];
1305         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1306                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1307                 return (-ENOSYS);
1308         }
1309
1310         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1311
1312         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1313         return (0);
1314 }
1315
1316 int
1317 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1318                                     struct rte_fdir_filter *fdir_filter,
1319                                     uint16_t soft_id, uint8_t queue,
1320                                     uint8_t drop)
1321 {
1322         struct rte_eth_dev *dev;
1323
1324         if (port_id >= nb_ports) {
1325                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1326                 return (-ENODEV);
1327         }
1328
1329         dev = &rte_eth_devices[port_id];
1330
1331         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1332                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1333                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1334                 return (-ENOSYS);
1335         }
1336
1337         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1338              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1339             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1340                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1341                                 "None l4type, source & destinations ports " \
1342                                 "should be null!\n");
1343                 return (-EINVAL);
1344         }
1345
1346         /* For now IPv6 is not supported with perfect filter */
1347         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1348                 return (-ENOTSUP);
1349
1350         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1351         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1352                                                                 soft_id, queue,
1353                                                                 drop);
1354 }
1355
1356 int
1357 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1358                                        struct rte_fdir_filter *fdir_filter,
1359                                        uint16_t soft_id, uint8_t queue,
1360                                        uint8_t drop)
1361 {
1362         struct rte_eth_dev *dev;
1363
1364         if (port_id >= nb_ports) {
1365                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1366                 return (-ENODEV);
1367         }
1368
1369         dev = &rte_eth_devices[port_id];
1370
1371         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1372                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1373                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1374                 return (-ENOSYS);
1375         }
1376
1377         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1378              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1379             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1380                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1381                                 "None l4type, source & destinations ports " \
1382                                 "should be null!\n");
1383                 return (-EINVAL);
1384         }
1385
1386         /* For now IPv6 is not supported with perfect filter */
1387         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1388                 return (-ENOTSUP);
1389
1390         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1391         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1392                                                         soft_id, queue, drop);
1393 }
1394
1395 int
1396 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1397                                        struct rte_fdir_filter *fdir_filter,
1398                                        uint16_t soft_id)
1399 {
1400         struct rte_eth_dev *dev;
1401
1402         if (port_id >= nb_ports) {
1403                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1404                 return (-ENODEV);
1405         }
1406
1407         dev = &rte_eth_devices[port_id];
1408
1409         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1410                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1411                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1412                 return (-ENOSYS);
1413         }
1414
1415         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1416              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1417             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1418                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1419                                 "None l4type, source & destinations ports " \
1420                                 "should be null!\n");
1421                 return (-EINVAL);
1422         }
1423
1424         /* For now IPv6 is not supported with perfect filter */
1425         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1426                 return (-ENOTSUP);
1427
1428         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
1429         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
1430                                                                 soft_id);
1431 }
1432
1433 int
1434 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
1435 {
1436         struct rte_eth_dev *dev;
1437
1438         if (port_id >= nb_ports) {
1439                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1440                 return (-ENODEV);
1441         }
1442
1443         dev = &rte_eth_devices[port_id];
1444         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1445                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1446                 return (-ENOSYS);
1447         }
1448
1449         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
1450         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
1451 }
1452
1453 int
1454 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1455 {
1456         struct rte_eth_dev *dev;
1457
1458         if (port_id >= nb_ports) {
1459                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1460                 return (-ENODEV);
1461         }
1462
1463         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1464                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1465                 return (-EINVAL);
1466         }
1467
1468         dev = &rte_eth_devices[port_id];
1469         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1470         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1471 }
1472
1473 int
1474 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1475 {
1476         struct rte_eth_dev *dev;
1477
1478         if (port_id >= nb_ports) {
1479                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1480                 return (-ENODEV);
1481         }
1482
1483         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1484                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1485                 return (-EINVAL);
1486         }
1487
1488         dev = &rte_eth_devices[port_id];
1489         /* High water, low water validation are device specific */
1490         if  (*dev->dev_ops->priority_flow_ctrl_set)
1491                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1492         return (-ENOTSUP);
1493 }
1494
1495 int
1496 rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1497 {
1498         struct rte_eth_dev *dev;
1499         uint8_t i,j;
1500
1501         if (port_id >= nb_ports) {
1502                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1503                 return (-ENODEV);
1504         }
1505
1506         /* Invalid mask bit(s) setting */
1507         if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1508                 PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
1509                 return (-EINVAL);
1510         }
1511
1512         if (reta_conf->mask_lo != 0) {
1513                 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
1514                         if ((reta_conf->mask_lo & (1ULL << i)) &&
1515                                 (reta_conf->reta[i] >= ETH_RSS_RETA_MAX_QUEUE)) {
1516                                 PMD_DEBUG_TRACE("RETA hash index output"
1517                                         "configration for port=%d,invalid"
1518                                         "queue=%d\n",port_id,reta_conf->reta[i]);
1519
1520                                 return (-EINVAL);
1521                         } 
1522                 }
1523         }
1524
1525         if (reta_conf->mask_hi != 0) {
1526                 for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {       
1527                         j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
1528
1529                         /* Check if the max entry >= 128 */
1530                         if ((reta_conf->mask_hi & (1ULL << i)) && 
1531                                 (reta_conf->reta[j] >= ETH_RSS_RETA_MAX_QUEUE)) {
1532                                 PMD_DEBUG_TRACE("RETA hash index output"
1533                                         "configration for port=%d,invalid"
1534                                         "queue=%d\n",port_id,reta_conf->reta[j]);
1535
1536                                 return (-EINVAL);
1537                         }
1538                 }
1539         }
1540
1541         dev = &rte_eth_devices[port_id];
1542
1543         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1544         return (*dev->dev_ops->reta_update)(dev, reta_conf);
1545 }
1546
1547 int 
1548 rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
1549 {
1550         struct rte_eth_dev *dev;
1551         
1552         if (port_id >= nb_ports) {
1553                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1554                 return (-ENODEV);
1555         }
1556
1557         if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
1558                 PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
1559                 return (-EINVAL);
1560         }
1561
1562         dev = &rte_eth_devices[port_id];
1563         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1564         return (*dev->dev_ops->reta_query)(dev, reta_conf);
1565 }
1566
1567 int
1568 rte_eth_led_on(uint8_t port_id)
1569 {
1570         struct rte_eth_dev *dev;
1571
1572         if (port_id >= nb_ports) {
1573                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1574                 return (-ENODEV);
1575         }
1576
1577         dev = &rte_eth_devices[port_id];
1578         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1579         return ((*dev->dev_ops->dev_led_on)(dev));
1580 }
1581
1582 int
1583 rte_eth_led_off(uint8_t port_id)
1584 {
1585         struct rte_eth_dev *dev;
1586
1587         if (port_id >= nb_ports) {
1588                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1589                 return (-ENODEV);
1590         }
1591
1592         dev = &rte_eth_devices[port_id];
1593         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
1594         return ((*dev->dev_ops->dev_led_off)(dev));
1595 }
1596
1597 /*
1598  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
1599  * an empty spot.
1600  */
1601 static inline int
1602 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
1603 {
1604         struct rte_eth_dev_info dev_info;
1605         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1606         unsigned i;
1607
1608         rte_eth_dev_info_get(port_id, &dev_info);
1609
1610         for (i = 0; i < dev_info.max_mac_addrs; i++)
1611                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
1612                         return i;
1613
1614         return -1;
1615 }
1616
1617 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
1618
1619 int
1620 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
1621                 uint32_t pool)
1622 {
1623         struct rte_eth_dev *dev;
1624         int index;
1625
1626         if (port_id >= nb_ports) {
1627                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1628                 return (-ENODEV);
1629         }
1630         dev = &rte_eth_devices[port_id];
1631         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
1632
1633         if (is_zero_ether_addr(addr)) {
1634                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", port_id);
1635                 return (-EINVAL);
1636         }
1637
1638         /* Check if it's already there, and do nothing */
1639         index = get_mac_addr_index(port_id, addr);
1640         if (index >= 0)
1641                 return 0;
1642
1643         index = get_mac_addr_index(port_id, &null_mac_addr);
1644         if (index < 0) {
1645                 PMD_DEBUG_TRACE("port %d: MAC address array full\n", port_id);
1646                 return (-ENOSPC);
1647         }
1648
1649         /* Update NIC */
1650         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
1651
1652         /* Update address in NIC data structure */
1653         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
1654
1655         return 0;
1656 }
1657
1658 int
1659 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
1660 {
1661         struct rte_eth_dev *dev;
1662         int index;
1663
1664         if (port_id >= nb_ports) {
1665                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1666                 return (-ENODEV);
1667         }
1668         dev = &rte_eth_devices[port_id];
1669         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
1670
1671         index = get_mac_addr_index(port_id, addr);
1672         if (index == 0) {
1673                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
1674                 return (-EADDRINUSE);
1675         } else if (index < 0)
1676                 return 0;  /* Do nothing if address wasn't found */
1677
1678         /* Update NIC */
1679         (*dev->dev_ops->mac_addr_remove)(dev, index);
1680
1681         /* Update address in NIC data structure */
1682         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
1683
1684         return 0;
1685 }
1686
1687 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1688 uint16_t
1689 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
1690                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1691 {
1692         struct rte_eth_dev *dev;
1693
1694         if (port_id >= nb_ports) {
1695                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1696                 return 0;
1697         }
1698         dev = &rte_eth_devices[port_id];
1699         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
1700         if (queue_id >= dev->data->nb_rx_queues) {
1701                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
1702                 return 0;
1703         }
1704         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
1705                                                 rx_pkts, nb_pkts);
1706 }
1707
1708 uint16_t
1709 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
1710                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1711 {
1712         struct rte_eth_dev *dev;
1713
1714         if (port_id >= nb_ports) {
1715                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1716                 return 0;
1717         }
1718         dev = &rte_eth_devices[port_id];
1719
1720         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
1721         if (queue_id >= dev->data->nb_tx_queues) {
1722                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
1723                 return 0;
1724         }
1725         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
1726                                                 tx_pkts, nb_pkts);
1727 }
1728
1729 uint32_t
1730 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
1731 {
1732         struct rte_eth_dev *dev;
1733
1734         if (port_id >= nb_ports) {
1735                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1736                 return 0;
1737         }
1738         dev = &rte_eth_devices[port_id];
1739         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
1740         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);  
1741 }
1742
1743 int
1744 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
1745 {
1746         struct rte_eth_dev *dev;
1747
1748         if (port_id >= nb_ports) {
1749                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1750                 return (-ENODEV);
1751         }
1752         dev = &rte_eth_devices[port_id];
1753         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
1754         return (*dev->dev_ops->rx_descriptor_done)( \
1755                 dev->data->rx_queues[queue_id], offset);
1756 }
1757 #endif
1758
1759 int
1760 rte_eth_dev_callback_register(uint8_t port_id,
1761                         enum rte_eth_event_type event,
1762                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
1763 {
1764         struct rte_eth_dev *dev;
1765         struct rte_eth_dev_callback *user_cb;
1766
1767         if (!cb_fn)
1768                 return (-EINVAL);
1769         if (port_id >= nb_ports) {
1770                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1771                 return (-EINVAL);
1772         }
1773
1774         dev = &rte_eth_devices[port_id];
1775         rte_spinlock_lock(&rte_eth_dev_cb_lock);
1776
1777         TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
1778                 if (user_cb->cb_fn == cb_fn &&
1779                         user_cb->cb_arg == cb_arg &&
1780                         user_cb->event == event) {
1781                         break;
1782                 }
1783         }
1784
1785         /* create a new callback. */
1786         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1787                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
1788                 user_cb->cb_fn = cb_fn;
1789                 user_cb->cb_arg = cb_arg;
1790                 user_cb->event = event;
1791                 TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
1792         }
1793
1794         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1795         return ((user_cb == NULL) ? -ENOMEM : 0);
1796 }
1797
1798 int
1799 rte_eth_dev_callback_unregister(uint8_t port_id,
1800                         enum rte_eth_event_type event,
1801                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
1802 {
1803         int ret;
1804         struct rte_eth_dev *dev;
1805         struct rte_eth_dev_callback *cb, *next;
1806
1807         if (!cb_fn)
1808                 return (-EINVAL);
1809         if (port_id >= nb_ports) {
1810                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1811                 return (-EINVAL);
1812         }
1813
1814         dev = &rte_eth_devices[port_id];
1815         rte_spinlock_lock(&rte_eth_dev_cb_lock);
1816
1817         ret = 0;
1818         for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
1819
1820                 next = TAILQ_NEXT(cb, next);
1821
1822                 if (cb->cb_fn != cb_fn || cb->event != event ||
1823                                 (cb->cb_arg != (void *)-1 &&
1824                                 cb->cb_arg != cb_arg))
1825                         continue;
1826
1827                 /*
1828                  * if this callback is not executing right now,
1829                  * then remove it.
1830                  */
1831                 if (cb->active == 0) {
1832                         TAILQ_REMOVE(&(dev->callbacks), cb, next);
1833                         rte_free(cb);
1834                 } else {
1835                         ret = -EAGAIN;
1836                 }
1837         }
1838
1839         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1840         return (ret);
1841 }
1842
1843 void
1844 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
1845         enum rte_eth_event_type event)
1846 {
1847         struct rte_eth_dev_callback *cb_lst;
1848         struct rte_eth_dev_callback dev_cb;
1849
1850         rte_spinlock_lock(&rte_eth_dev_cb_lock);
1851         TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
1852                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1853                         continue;
1854                 dev_cb = *cb_lst;
1855                 cb_lst->active = 1;
1856                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1857                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
1858                                                 dev_cb.cb_arg);
1859                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
1860                 cb_lst->active = 0;
1861         }
1862         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
1863 }