ethdev: remove assumption that port will not be detached
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68 #include <rte_string_fns.h>
69
70 #include "rte_ether.h"
71 #include "rte_ethdev.h"
72
73 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
74 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
75                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
76         } while (0)
77 #else
78 #define PMD_DEBUG_TRACE(fmt, args...)
79 #endif
80
81 /* Macros for checking for restricting functions to primary instance only */
82 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
83         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
84                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
85                 return (retval); \
86         } \
87 } while(0)
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while(0)
94
95 /* Macros to check for invlaid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while(0)
102 #define FUNC_PTR_OR_RET(func) do { \
103         if ((func) == NULL) { \
104                 PMD_DEBUG_TRACE("Function not supported\n"); \
105                 return; \
106         } \
107 } while(0)
108
109 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
110 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
111 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
112 static uint8_t nb_ports = 0;
113
114 /* spinlock for eth device callbacks */
115 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
116
117 /* store statistics names and its offset in stats structure  */
118 struct rte_eth_xstats_name_off {
119         char name[RTE_ETH_XSTATS_NAME_SIZE];
120         unsigned offset;
121 };
122
123 static struct rte_eth_xstats_name_off rte_stats_strings[] = {
124          {"rx_packets", offsetof(struct rte_eth_stats, ipackets)},
125          {"tx_packets", offsetof(struct rte_eth_stats, opackets)},
126          {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)},
127          {"tx_bytes", offsetof(struct rte_eth_stats, obytes)},
128          {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
129          {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
130          {"rx_crc_errors", offsetof(struct rte_eth_stats, ibadcrc)},
131          {"rx_bad_length_errors", offsetof(struct rte_eth_stats, ibadlen)},
132          {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
133          {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)},
134          {"fdir_match", offsetof(struct rte_eth_stats, fdirmatch)},
135          {"fdir_miss", offsetof(struct rte_eth_stats, fdirmiss)},
136          {"tx_flow_control_xon", offsetof(struct rte_eth_stats, tx_pause_xon)},
137          {"rx_flow_control_xon", offsetof(struct rte_eth_stats, rx_pause_xon)},
138          {"tx_flow_control_xoff", offsetof(struct rte_eth_stats, tx_pause_xoff)},
139          {"rx_flow_control_xoff", offsetof(struct rte_eth_stats, rx_pause_xoff)},
140 };
141 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
142
143 static struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
144         {"rx_packets", offsetof(struct rte_eth_stats, q_ipackets)},
145         {"rx_bytes", offsetof(struct rte_eth_stats, q_ibytes)},
146 };
147 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
148                 sizeof(rte_rxq_stats_strings[0]))
149
150 static struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
151         {"tx_packets", offsetof(struct rte_eth_stats, q_opackets)},
152         {"tx_bytes", offsetof(struct rte_eth_stats, q_obytes)},
153         {"tx_errors", offsetof(struct rte_eth_stats, q_errors)},
154 };
155 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
156                 sizeof(rte_txq_stats_strings[0]))
157
158
159 /**
160  * The user application callback description.
161  *
162  * It contains callback address to be registered by user application,
163  * the pointer to the parameters for callback, and the event type.
164  */
165 struct rte_eth_dev_callback {
166         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
167         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
168         void *cb_arg;                           /**< Parameter for callback */
169         enum rte_eth_event_type event;          /**< Interrupt event type */
170         uint32_t active;                        /**< Callback is executing */
171 };
172
173 enum {
174         STAT_QMAP_TX = 0,
175         STAT_QMAP_RX
176 };
177
178 enum {
179         DEV_DETACHED = 0,
180         DEV_ATTACHED
181 };
182
183 static inline void
184 rte_eth_dev_data_alloc(void)
185 {
186         const unsigned flags = 0;
187         const struct rte_memzone *mz;
188
189         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
190                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
191                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
192                                 rte_socket_id(), flags);
193         } else
194                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
195         if (mz == NULL)
196                 rte_panic("Cannot allocate memzone for ethernet port data\n");
197
198         rte_eth_dev_data = mz->addr;
199         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
200                 memset(rte_eth_dev_data, 0,
201                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
202 }
203
204 static struct rte_eth_dev *
205 rte_eth_dev_allocated(const char *name)
206 {
207         unsigned i;
208
209         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
210                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
211                     strcmp(rte_eth_devices[i].data->name, name) == 0)
212                         return &rte_eth_devices[i];
213         }
214         return NULL;
215 }
216
217 static uint8_t
218 rte_eth_dev_find_free_port(void)
219 {
220         unsigned i;
221
222         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
223                 if (rte_eth_devices[i].attached == DEV_DETACHED)
224                         return i;
225         }
226         return RTE_MAX_ETHPORTS;
227 }
228
229 struct rte_eth_dev *
230 rte_eth_dev_allocate(const char *name)
231 {
232         uint8_t port_id;
233         struct rte_eth_dev *eth_dev;
234
235         port_id = rte_eth_dev_find_free_port();
236         if (port_id == RTE_MAX_ETHPORTS) {
237                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
238                 return NULL;
239         }
240
241         if (rte_eth_dev_data == NULL)
242                 rte_eth_dev_data_alloc();
243
244         if (rte_eth_dev_allocated(name) != NULL) {
245                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n", name);
246                 return NULL;
247         }
248
249         eth_dev = &rte_eth_devices[port_id];
250         eth_dev->data = &rte_eth_dev_data[port_id];
251         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
252         eth_dev->data->port_id = port_id;
253         eth_dev->attached = DEV_ATTACHED;
254         nb_ports++;
255         return eth_dev;
256 }
257
258 static int
259 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
260                  struct rte_pci_device *pci_dev)
261 {
262         struct eth_driver    *eth_drv;
263         struct rte_eth_dev *eth_dev;
264         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
265
266         int diag;
267
268         eth_drv = (struct eth_driver *)pci_drv;
269
270         /* Create unique Ethernet device name using PCI address */
271         snprintf(ethdev_name, RTE_ETH_NAME_MAX_LEN, "%d:%d.%d",
272                         pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
273
274         eth_dev = rte_eth_dev_allocate(ethdev_name);
275         if (eth_dev == NULL)
276                 return -ENOMEM;
277
278         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
279                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
280                                   eth_drv->dev_private_size,
281                                   RTE_CACHE_LINE_SIZE);
282                 if (eth_dev->data->dev_private == NULL)
283                         rte_panic("Cannot allocate memzone for private port data\n");
284         }
285         eth_dev->pci_dev = pci_dev;
286         eth_dev->driver = eth_drv;
287         eth_dev->data->rx_mbuf_alloc_failed = 0;
288
289         /* init user callbacks */
290         TAILQ_INIT(&(eth_dev->link_intr_cbs));
291
292         /*
293          * Set the default MTU.
294          */
295         eth_dev->data->mtu = ETHER_MTU;
296
297         /* Invoke PMD device initialization function */
298         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
299         if (diag == 0)
300                 return (0);
301
302         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
303                         " failed\n", pci_drv->name,
304                         (unsigned) pci_dev->id.vendor_id,
305                         (unsigned) pci_dev->id.device_id);
306         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
307                 rte_free(eth_dev->data->dev_private);
308         eth_dev->attached = DEV_DETACHED;
309         nb_ports--;
310         return diag;
311 }
312
313 /**
314  * Register an Ethernet [Poll Mode] driver.
315  *
316  * Function invoked by the initialization function of an Ethernet driver
317  * to simultaneously register itself as a PCI driver and as an Ethernet
318  * Poll Mode Driver.
319  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
320  * structure embedded in the *eth_drv* structure, after having stored the
321  * address of the rte_eth_dev_init() function in the *devinit* field of
322  * the *pci_drv* structure.
323  * During the PCI probing phase, the rte_eth_dev_init() function is
324  * invoked for each PCI [Ethernet device] matching the embedded PCI
325  * identifiers provided by the driver.
326  */
327 void
328 rte_eth_driver_register(struct eth_driver *eth_drv)
329 {
330         eth_drv->pci_drv.devinit = rte_eth_dev_init;
331         rte_eal_pci_register(&eth_drv->pci_drv);
332 }
333
334 static int
335 rte_eth_dev_is_valid_port(uint8_t port_id)
336 {
337         if (port_id >= RTE_MAX_ETHPORTS ||
338             rte_eth_devices[port_id].attached != DEV_ATTACHED)
339                 return 0;
340         else
341                 return 1;
342 }
343
344 int
345 rte_eth_dev_socket_id(uint8_t port_id)
346 {
347         if (!rte_eth_dev_is_valid_port(port_id))
348                 return -1;
349         return rte_eth_devices[port_id].pci_dev->numa_node;
350 }
351
352 uint8_t
353 rte_eth_dev_count(void)
354 {
355         return (nb_ports);
356 }
357
358 static int
359 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
360 {
361         uint16_t old_nb_queues = dev->data->nb_rx_queues;
362         void **rxq;
363         unsigned i;
364
365         if (dev->data->rx_queues == NULL) { /* first time configuration */
366                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
367                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
368                                 RTE_CACHE_LINE_SIZE);
369                 if (dev->data->rx_queues == NULL) {
370                         dev->data->nb_rx_queues = 0;
371                         return -(ENOMEM);
372                 }
373 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
374                 dev->post_rx_burst_cbs = rte_zmalloc(
375                         "ethdev->post_rx_burst_cbs",
376                         sizeof(*dev->post_rx_burst_cbs) * nb_queues,
377                         RTE_CACHE_LINE_SIZE);
378                 if (dev->post_rx_burst_cbs == NULL) {
379                         rte_free(dev->data->rx_queues);
380                         dev->data->rx_queues = NULL;
381                         dev->data->nb_rx_queues = 0;
382                         return -ENOMEM;
383                 }
384 #endif
385
386         } else { /* re-configure */
387                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
388
389                 rxq = dev->data->rx_queues;
390
391                 for (i = nb_queues; i < old_nb_queues; i++)
392                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
393                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
394                                 RTE_CACHE_LINE_SIZE);
395                 if (rxq == NULL)
396                         return -(ENOMEM);
397 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
398                 dev->post_rx_burst_cbs = rte_realloc(
399                         dev->post_rx_burst_cbs,
400                         sizeof(*dev->post_rx_burst_cbs) *
401                                 nb_queues, RTE_CACHE_LINE_SIZE);
402                 if (dev->post_rx_burst_cbs == NULL)
403                         return -ENOMEM;
404 #endif
405                 if (nb_queues > old_nb_queues) {
406                         uint16_t new_qs = nb_queues - old_nb_queues;
407                         memset(rxq + old_nb_queues, 0,
408                                 sizeof(rxq[0]) * new_qs);
409 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
410                         memset(dev->post_rx_burst_cbs + old_nb_queues, 0,
411                                 sizeof(dev->post_rx_burst_cbs[0]) * new_qs);
412 #endif
413                 }
414
415                 dev->data->rx_queues = rxq;
416
417         }
418         dev->data->nb_rx_queues = nb_queues;
419         return (0);
420 }
421
422 int
423 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
424 {
425         struct rte_eth_dev *dev;
426
427         /* This function is only safe when called from the primary process
428          * in a multi-process setup*/
429         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
430
431         if (!rte_eth_dev_is_valid_port(port_id)) {
432                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
433                 return -EINVAL;
434         }
435
436         dev = &rte_eth_devices[port_id];
437         if (rx_queue_id >= dev->data->nb_rx_queues) {
438                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
439                 return -EINVAL;
440         }
441
442         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
443
444         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
445
446 }
447
448 int
449 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
450 {
451         struct rte_eth_dev *dev;
452
453         /* This function is only safe when called from the primary process
454          * in a multi-process setup*/
455         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
456
457         if (!rte_eth_dev_is_valid_port(port_id)) {
458                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
459                 return -EINVAL;
460         }
461
462         dev = &rte_eth_devices[port_id];
463         if (rx_queue_id >= dev->data->nb_rx_queues) {
464                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
465                 return -EINVAL;
466         }
467
468         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
469
470         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
471
472 }
473
474 int
475 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
476 {
477         struct rte_eth_dev *dev;
478
479         /* This function is only safe when called from the primary process
480          * in a multi-process setup*/
481         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
482
483         if (!rte_eth_dev_is_valid_port(port_id)) {
484                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
485                 return -EINVAL;
486         }
487
488         dev = &rte_eth_devices[port_id];
489         if (tx_queue_id >= dev->data->nb_tx_queues) {
490                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
491                 return -EINVAL;
492         }
493
494         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
495
496         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
497
498 }
499
500 int
501 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
502 {
503         struct rte_eth_dev *dev;
504
505         /* This function is only safe when called from the primary process
506          * in a multi-process setup*/
507         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
508
509         if (!rte_eth_dev_is_valid_port(port_id)) {
510                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
511                 return -EINVAL;
512         }
513
514         dev = &rte_eth_devices[port_id];
515         if (tx_queue_id >= dev->data->nb_tx_queues) {
516                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
517                 return -EINVAL;
518         }
519
520         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
521
522         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
523
524 }
525
526 static int
527 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
528 {
529         uint16_t old_nb_queues = dev->data->nb_tx_queues;
530         void **txq;
531         unsigned i;
532
533         if (dev->data->tx_queues == NULL) { /* first time configuration */
534                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
535                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
536                                 RTE_CACHE_LINE_SIZE);
537                 if (dev->data->tx_queues == NULL) {
538                         dev->data->nb_tx_queues = 0;
539                         return -(ENOMEM);
540                 }
541 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
542                 dev->pre_tx_burst_cbs = rte_zmalloc(
543                         "ethdev->pre_tx_burst_cbs",
544                         sizeof(*dev->pre_tx_burst_cbs) * nb_queues,
545                         RTE_CACHE_LINE_SIZE);
546                 if (dev->pre_tx_burst_cbs == NULL) {
547                         rte_free(dev->data->tx_queues);
548                         dev->data->tx_queues = NULL;
549                         dev->data->nb_tx_queues = 0;
550                         return -ENOMEM;
551                 }
552 #endif
553
554         } else { /* re-configure */
555                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
556
557                 txq = dev->data->tx_queues;
558
559                 for (i = nb_queues; i < old_nb_queues; i++)
560                         (*dev->dev_ops->tx_queue_release)(txq[i]);
561                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
562                                 RTE_CACHE_LINE_SIZE);
563                 if (txq == NULL)
564                         return -ENOMEM;
565 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
566                 dev->pre_tx_burst_cbs = rte_realloc(
567                         dev->pre_tx_burst_cbs,
568                         sizeof(*dev->pre_tx_burst_cbs) *
569                                 nb_queues, RTE_CACHE_LINE_SIZE);
570                 if (dev->pre_tx_burst_cbs == NULL)
571                         return -ENOMEM;
572 #endif
573                 if (nb_queues > old_nb_queues) {
574                         uint16_t new_qs = nb_queues - old_nb_queues;
575                         memset(txq + old_nb_queues, 0,
576                                 sizeof(txq[0]) * new_qs);
577 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
578                         memset(dev->pre_tx_burst_cbs + old_nb_queues, 0,
579                                 sizeof(dev->pre_tx_burst_cbs[0]) * new_qs);
580 #endif
581                 }
582
583                 dev->data->tx_queues = txq;
584
585         }
586         dev->data->nb_tx_queues = nb_queues;
587         return (0);
588 }
589
590 static int
591 rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
592 {
593         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
594         switch (nb_rx_q) {
595         case 1:
596         case 2:
597                 RTE_ETH_DEV_SRIOV(dev).active =
598                         ETH_64_POOLS;
599                 break;
600         case 4:
601                 RTE_ETH_DEV_SRIOV(dev).active =
602                         ETH_32_POOLS;
603                 break;
604         default:
605                 return -EINVAL;
606         }
607
608         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
609         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
610                 dev->pci_dev->max_vfs * nb_rx_q;
611
612         return 0;
613 }
614
615 static int
616 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
617                       const struct rte_eth_conf *dev_conf)
618 {
619         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
620
621         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
622                 /* check multi-queue mode */
623                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
624                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
625                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
626                         /* SRIOV only works in VMDq enable mode */
627                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
628                                         " SRIOV active, "
629                                         "wrong VMDQ mq_mode rx %u tx %u\n",
630                                         port_id,
631                                         dev_conf->rxmode.mq_mode,
632                                         dev_conf->txmode.mq_mode);
633                         return (-EINVAL);
634                 }
635
636                 switch (dev_conf->rxmode.mq_mode) {
637                 case ETH_MQ_RX_VMDQ_DCB:
638                 case ETH_MQ_RX_VMDQ_DCB_RSS:
639                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
640                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
641                                         " SRIOV active, "
642                                         "unsupported VMDQ mq_mode rx %u\n",
643                                         port_id, dev_conf->rxmode.mq_mode);
644                         return (-EINVAL);
645                 case ETH_MQ_RX_RSS:
646                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
647                                         " SRIOV active, "
648                                         "Rx mq mode is changed from:"
649                                         "mq_mode %u into VMDQ mq_mode %u\n",
650                                         port_id,
651                                         dev_conf->rxmode.mq_mode,
652                                         dev->data->dev_conf.rxmode.mq_mode);
653                 case ETH_MQ_RX_VMDQ_RSS:
654                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
655                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
656                                 if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) {
657                                         PMD_DEBUG_TRACE("ethdev port_id=%d"
658                                                 " SRIOV active, invalid queue"
659                                                 " number for VMDQ RSS, allowed"
660                                                 " value are 1, 2 or 4\n",
661                                                 port_id);
662                                         return -EINVAL;
663                                 }
664                         break;
665                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
666                         /* if nothing mq mode configure, use default scheme */
667                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
668                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
669                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
670                         break;
671                 }
672
673                 switch (dev_conf->txmode.mq_mode) {
674                 case ETH_MQ_TX_VMDQ_DCB:
675                         /* DCB VMDQ in SRIOV mode, not implement yet */
676                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
677                                         " SRIOV active, "
678                                         "unsupported VMDQ mq_mode tx %u\n",
679                                         port_id, dev_conf->txmode.mq_mode);
680                         return (-EINVAL);
681                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
682                         /* if nothing mq mode configure, use default scheme */
683                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
684                         break;
685                 }
686
687                 /* check valid queue number */
688                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
689                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
690                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
691                                     "queue number must less equal to %d\n",
692                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
693                         return (-EINVAL);
694                 }
695         } else {
696                 /* For vmdb+dcb mode check our configuration before we go further */
697                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
698                         const struct rte_eth_vmdq_dcb_conf *conf;
699
700                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
701                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
702                                                 "!= %d\n",
703                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
704                                 return (-EINVAL);
705                         }
706                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
707                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
708                                conf->nb_queue_pools == ETH_32_POOLS)) {
709                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
710                                                 "nb_queue_pools must be %d or %d\n",
711                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
712                                 return (-EINVAL);
713                         }
714                 }
715                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
716                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
717
718                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
719                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
720                                                 "!= %d\n",
721                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
722                                 return (-EINVAL);
723                         }
724                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
725                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
726                                conf->nb_queue_pools == ETH_32_POOLS)) {
727                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
728                                                 "nb_queue_pools != %d or nb_queue_pools "
729                                                 "!= %d\n",
730                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
731                                 return (-EINVAL);
732                         }
733                 }
734
735                 /* For DCB mode check our configuration before we go further */
736                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
737                         const struct rte_eth_dcb_rx_conf *conf;
738
739                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
740                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
741                                                 "!= %d\n",
742                                                 port_id, ETH_DCB_NUM_QUEUES);
743                                 return (-EINVAL);
744                         }
745                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
746                         if (! (conf->nb_tcs == ETH_4_TCS ||
747                                conf->nb_tcs == ETH_8_TCS)) {
748                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
749                                                 "nb_tcs != %d or nb_tcs "
750                                                 "!= %d\n",
751                                                 port_id, ETH_4_TCS, ETH_8_TCS);
752                                 return (-EINVAL);
753                         }
754                 }
755
756                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
757                         const struct rte_eth_dcb_tx_conf *conf;
758
759                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
760                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
761                                                 "!= %d\n",
762                                                 port_id, ETH_DCB_NUM_QUEUES);
763                                 return (-EINVAL);
764                         }
765                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
766                         if (! (conf->nb_tcs == ETH_4_TCS ||
767                                conf->nb_tcs == ETH_8_TCS)) {
768                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
769                                                 "nb_tcs != %d or nb_tcs "
770                                                 "!= %d\n",
771                                                 port_id, ETH_4_TCS, ETH_8_TCS);
772                                 return (-EINVAL);
773                         }
774                 }
775         }
776         return 0;
777 }
778
779 int
780 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
781                       const struct rte_eth_conf *dev_conf)
782 {
783         struct rte_eth_dev *dev;
784         struct rte_eth_dev_info dev_info;
785         int diag;
786
787         /* This function is only safe when called from the primary process
788          * in a multi-process setup*/
789         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
790
791         if (!rte_eth_dev_is_valid_port(port_id)) {
792                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
793                 return (-EINVAL);
794         }
795
796         dev = &rte_eth_devices[port_id];
797
798         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
799         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
800
801         if (dev->data->dev_started) {
802                 PMD_DEBUG_TRACE(
803                     "port %d must be stopped to allow configuration\n", port_id);
804                 return (-EBUSY);
805         }
806
807         /*
808          * Check that the numbers of RX and TX queues are not greater
809          * than the maximum number of RX and TX queues supported by the
810          * configured device.
811          */
812         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
813         if (nb_rx_q > dev_info.max_rx_queues) {
814                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
815                                 port_id, nb_rx_q, dev_info.max_rx_queues);
816                 return (-EINVAL);
817         }
818         if (nb_rx_q == 0) {
819                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
820                 return (-EINVAL);
821         }
822
823         if (nb_tx_q > dev_info.max_tx_queues) {
824                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
825                                 port_id, nb_tx_q, dev_info.max_tx_queues);
826                 return (-EINVAL);
827         }
828         if (nb_tx_q == 0) {
829                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
830                 return (-EINVAL);
831         }
832
833         /* Copy the dev_conf parameter into the dev structure */
834         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
835
836         /*
837          * If link state interrupt is enabled, check that the
838          * device supports it.
839          */
840         if (dev_conf->intr_conf.lsc == 1) {
841                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
842
843                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
844                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
845                                         pci_drv->name);
846                         return (-EINVAL);
847                 }
848         }
849
850         /*
851          * If jumbo frames are enabled, check that the maximum RX packet
852          * length is supported by the configured device.
853          */
854         if (dev_conf->rxmode.jumbo_frame == 1) {
855                 if (dev_conf->rxmode.max_rx_pkt_len >
856                     dev_info.max_rx_pktlen) {
857                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
858                                 " > max valid value %u\n",
859                                 port_id,
860                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
861                                 (unsigned)dev_info.max_rx_pktlen);
862                         return (-EINVAL);
863                 }
864                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
865                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
866                                 " < min valid value %u\n",
867                                 port_id,
868                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
869                                 (unsigned)ETHER_MIN_LEN);
870                         return (-EINVAL);
871                 }
872         } else {
873                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
874                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
875                         /* Use default value */
876                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
877                                                         ETHER_MAX_LEN;
878         }
879
880         /* multipe queue mode checking */
881         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
882         if (diag != 0) {
883                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
884                                 port_id, diag);
885                 return diag;
886         }
887
888         /*
889          * Setup new number of RX/TX queues and reconfigure device.
890          */
891         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
892         if (diag != 0) {
893                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
894                                 port_id, diag);
895                 return diag;
896         }
897
898         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
899         if (diag != 0) {
900                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
901                                 port_id, diag);
902                 rte_eth_dev_rx_queue_config(dev, 0);
903                 return diag;
904         }
905
906         diag = (*dev->dev_ops->dev_configure)(dev);
907         if (diag != 0) {
908                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
909                                 port_id, diag);
910                 rte_eth_dev_rx_queue_config(dev, 0);
911                 rte_eth_dev_tx_queue_config(dev, 0);
912                 return diag;
913         }
914
915         return 0;
916 }
917
918 static void
919 rte_eth_dev_config_restore(uint8_t port_id)
920 {
921         struct rte_eth_dev *dev;
922         struct rte_eth_dev_info dev_info;
923         struct ether_addr addr;
924         uint16_t i;
925         uint32_t pool = 0;
926
927         dev = &rte_eth_devices[port_id];
928
929         rte_eth_dev_info_get(port_id, &dev_info);
930
931         if (RTE_ETH_DEV_SRIOV(dev).active)
932                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
933
934         /* replay MAC address configuration */
935         for (i = 0; i < dev_info.max_mac_addrs; i++) {
936                 addr = dev->data->mac_addrs[i];
937
938                 /* skip zero address */
939                 if (is_zero_ether_addr(&addr))
940                         continue;
941
942                 /* add address to the hardware */
943                 if  (*dev->dev_ops->mac_addr_add &&
944                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
945                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
946                 else {
947                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
948                                         port_id);
949                         /* exit the loop but not return an error */
950                         break;
951                 }
952         }
953
954         /* replay promiscuous configuration */
955         if (rte_eth_promiscuous_get(port_id) == 1)
956                 rte_eth_promiscuous_enable(port_id);
957         else if (rte_eth_promiscuous_get(port_id) == 0)
958                 rte_eth_promiscuous_disable(port_id);
959
960         /* replay allmulticast configuration */
961         if (rte_eth_allmulticast_get(port_id) == 1)
962                 rte_eth_allmulticast_enable(port_id);
963         else if (rte_eth_allmulticast_get(port_id) == 0)
964                 rte_eth_allmulticast_disable(port_id);
965 }
966
967 int
968 rte_eth_dev_start(uint8_t port_id)
969 {
970         struct rte_eth_dev *dev;
971         int diag;
972
973         /* This function is only safe when called from the primary process
974          * in a multi-process setup*/
975         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
976
977         if (!rte_eth_dev_is_valid_port(port_id)) {
978                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
979                 return (-EINVAL);
980         }
981
982         dev = &rte_eth_devices[port_id];
983
984         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
985
986         if (dev->data->dev_started != 0) {
987                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
988                         " already started\n",
989                         port_id);
990                 return (0);
991         }
992
993         diag = (*dev->dev_ops->dev_start)(dev);
994         if (diag == 0)
995                 dev->data->dev_started = 1;
996         else
997                 return diag;
998
999         rte_eth_dev_config_restore(port_id);
1000
1001         if (dev->data->dev_conf.intr_conf.lsc != 0) {
1002                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1003                 (*dev->dev_ops->link_update)(dev, 0);
1004         }
1005         return 0;
1006 }
1007
1008 void
1009 rte_eth_dev_stop(uint8_t port_id)
1010 {
1011         struct rte_eth_dev *dev;
1012
1013         /* This function is only safe when called from the primary process
1014          * in a multi-process setup*/
1015         PROC_PRIMARY_OR_RET();
1016
1017         if (!rte_eth_dev_is_valid_port(port_id)) {
1018                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1019                 return;
1020         }
1021
1022         dev = &rte_eth_devices[port_id];
1023
1024         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1025
1026         if (dev->data->dev_started == 0) {
1027                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1028                         " already stopped\n",
1029                         port_id);
1030                 return;
1031         }
1032
1033         dev->data->dev_started = 0;
1034         (*dev->dev_ops->dev_stop)(dev);
1035 }
1036
1037 int
1038 rte_eth_dev_set_link_up(uint8_t port_id)
1039 {
1040         struct rte_eth_dev *dev;
1041
1042         /* This function is only safe when called from the primary process
1043          * in a multi-process setup*/
1044         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1045
1046         if (!rte_eth_dev_is_valid_port(port_id)) {
1047                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1048                 return -EINVAL;
1049         }
1050
1051         dev = &rte_eth_devices[port_id];
1052
1053         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1054         return (*dev->dev_ops->dev_set_link_up)(dev);
1055 }
1056
1057 int
1058 rte_eth_dev_set_link_down(uint8_t port_id)
1059 {
1060         struct rte_eth_dev *dev;
1061
1062         /* This function is only safe when called from the primary process
1063          * in a multi-process setup*/
1064         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1065
1066         if (!rte_eth_dev_is_valid_port(port_id)) {
1067                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1068                 return -EINVAL;
1069         }
1070
1071         dev = &rte_eth_devices[port_id];
1072
1073         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1074         return (*dev->dev_ops->dev_set_link_down)(dev);
1075 }
1076
1077 void
1078 rte_eth_dev_close(uint8_t port_id)
1079 {
1080         struct rte_eth_dev *dev;
1081
1082         /* This function is only safe when called from the primary process
1083          * in a multi-process setup*/
1084         PROC_PRIMARY_OR_RET();
1085
1086         if (!rte_eth_dev_is_valid_port(port_id)) {
1087                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1088                 return;
1089         }
1090
1091         dev = &rte_eth_devices[port_id];
1092
1093         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1094         dev->data->dev_started = 0;
1095         (*dev->dev_ops->dev_close)(dev);
1096 }
1097
1098 int
1099 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1100                        uint16_t nb_rx_desc, unsigned int socket_id,
1101                        const struct rte_eth_rxconf *rx_conf,
1102                        struct rte_mempool *mp)
1103 {
1104         int ret;
1105         uint32_t mbp_buf_size;
1106         struct rte_eth_dev *dev;
1107         struct rte_pktmbuf_pool_private *mbp_priv;
1108         struct rte_eth_dev_info dev_info;
1109
1110         /* This function is only safe when called from the primary process
1111          * in a multi-process setup*/
1112         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1113
1114         if (!rte_eth_dev_is_valid_port(port_id)) {
1115                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1116                 return (-EINVAL);
1117         }
1118
1119         dev = &rte_eth_devices[port_id];
1120         if (rx_queue_id >= dev->data->nb_rx_queues) {
1121                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1122                 return (-EINVAL);
1123         }
1124
1125         if (dev->data->dev_started) {
1126                 PMD_DEBUG_TRACE(
1127                     "port %d must be stopped to allow configuration\n", port_id);
1128                 return -EBUSY;
1129         }
1130
1131         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1132         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1133
1134         /*
1135          * Check the size of the mbuf data buffer.
1136          * This value must be provided in the private data of the memory pool.
1137          * First check that the memory pool has a valid private data.
1138          */
1139         rte_eth_dev_info_get(port_id, &dev_info);
1140         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1141                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1142                                 mp->name, (int) mp->private_data_size,
1143                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1144                 return (-ENOSPC);
1145         }
1146         mbp_priv = rte_mempool_get_priv(mp);
1147         mbp_buf_size = mbp_priv->mbuf_data_room_size;
1148
1149         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1150                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1151                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1152                                 "=%d)\n",
1153                                 mp->name,
1154                                 (int)mbp_buf_size,
1155                                 (int)(RTE_PKTMBUF_HEADROOM +
1156                                       dev_info.min_rx_bufsize),
1157                                 (int)RTE_PKTMBUF_HEADROOM,
1158                                 (int)dev_info.min_rx_bufsize);
1159                 return (-EINVAL);
1160         }
1161
1162         if (rx_conf == NULL)
1163                 rx_conf = &dev_info.default_rxconf;
1164
1165         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1166                                               socket_id, rx_conf, mp);
1167         if (!ret) {
1168                 if (!dev->data->min_rx_buf_size ||
1169                     dev->data->min_rx_buf_size > mbp_buf_size)
1170                         dev->data->min_rx_buf_size = mbp_buf_size;
1171         }
1172
1173         return ret;
1174 }
1175
1176 int
1177 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1178                        uint16_t nb_tx_desc, unsigned int socket_id,
1179                        const struct rte_eth_txconf *tx_conf)
1180 {
1181         struct rte_eth_dev *dev;
1182         struct rte_eth_dev_info dev_info;
1183
1184         /* This function is only safe when called from the primary process
1185          * in a multi-process setup*/
1186         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1187
1188         if (!rte_eth_dev_is_valid_port(port_id)) {
1189                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1190                 return (-EINVAL);
1191         }
1192
1193         dev = &rte_eth_devices[port_id];
1194         if (tx_queue_id >= dev->data->nb_tx_queues) {
1195                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1196                 return (-EINVAL);
1197         }
1198
1199         if (dev->data->dev_started) {
1200                 PMD_DEBUG_TRACE(
1201                     "port %d must be stopped to allow configuration\n", port_id);
1202                 return -EBUSY;
1203         }
1204
1205         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1206         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1207
1208         rte_eth_dev_info_get(port_id, &dev_info);
1209
1210         if (tx_conf == NULL)
1211                 tx_conf = &dev_info.default_txconf;
1212
1213         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1214                                                socket_id, tx_conf);
1215 }
1216
1217 void
1218 rte_eth_promiscuous_enable(uint8_t port_id)
1219 {
1220         struct rte_eth_dev *dev;
1221
1222         if (!rte_eth_dev_is_valid_port(port_id)) {
1223                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1224                 return;
1225         }
1226
1227         dev = &rte_eth_devices[port_id];
1228
1229         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1230         (*dev->dev_ops->promiscuous_enable)(dev);
1231         dev->data->promiscuous = 1;
1232 }
1233
1234 void
1235 rte_eth_promiscuous_disable(uint8_t port_id)
1236 {
1237         struct rte_eth_dev *dev;
1238
1239         if (!rte_eth_dev_is_valid_port(port_id)) {
1240                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1241                 return;
1242         }
1243
1244         dev = &rte_eth_devices[port_id];
1245
1246         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1247         dev->data->promiscuous = 0;
1248         (*dev->dev_ops->promiscuous_disable)(dev);
1249 }
1250
1251 int
1252 rte_eth_promiscuous_get(uint8_t port_id)
1253 {
1254         struct rte_eth_dev *dev;
1255
1256         if (!rte_eth_dev_is_valid_port(port_id)) {
1257                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1258                 return -1;
1259         }
1260
1261         dev = &rte_eth_devices[port_id];
1262         return dev->data->promiscuous;
1263 }
1264
1265 void
1266 rte_eth_allmulticast_enable(uint8_t port_id)
1267 {
1268         struct rte_eth_dev *dev;
1269
1270         if (!rte_eth_dev_is_valid_port(port_id)) {
1271                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1272                 return;
1273         }
1274
1275         dev = &rte_eth_devices[port_id];
1276
1277         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1278         (*dev->dev_ops->allmulticast_enable)(dev);
1279         dev->data->all_multicast = 1;
1280 }
1281
1282 void
1283 rte_eth_allmulticast_disable(uint8_t port_id)
1284 {
1285         struct rte_eth_dev *dev;
1286
1287         if (!rte_eth_dev_is_valid_port(port_id)) {
1288                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1289                 return;
1290         }
1291
1292         dev = &rte_eth_devices[port_id];
1293
1294         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1295         dev->data->all_multicast = 0;
1296         (*dev->dev_ops->allmulticast_disable)(dev);
1297 }
1298
1299 int
1300 rte_eth_allmulticast_get(uint8_t port_id)
1301 {
1302         struct rte_eth_dev *dev;
1303
1304         if (!rte_eth_dev_is_valid_port(port_id)) {
1305                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1306                 return -1;
1307         }
1308
1309         dev = &rte_eth_devices[port_id];
1310         return dev->data->all_multicast;
1311 }
1312
1313 static inline int
1314 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1315                                 struct rte_eth_link *link)
1316 {
1317         struct rte_eth_link *dst = link;
1318         struct rte_eth_link *src = &(dev->data->dev_link);
1319
1320         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1321                                         *(uint64_t *)src) == 0)
1322                 return -1;
1323
1324         return 0;
1325 }
1326
1327 void
1328 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1329 {
1330         struct rte_eth_dev *dev;
1331
1332         if (!rte_eth_dev_is_valid_port(port_id)) {
1333                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1334                 return;
1335         }
1336
1337         dev = &rte_eth_devices[port_id];
1338
1339         if (dev->data->dev_conf.intr_conf.lsc != 0)
1340                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1341         else {
1342                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1343                 (*dev->dev_ops->link_update)(dev, 1);
1344                 *eth_link = dev->data->dev_link;
1345         }
1346 }
1347
1348 void
1349 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1350 {
1351         struct rte_eth_dev *dev;
1352
1353         if (!rte_eth_dev_is_valid_port(port_id)) {
1354                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1355                 return;
1356         }
1357
1358         dev = &rte_eth_devices[port_id];
1359
1360         if (dev->data->dev_conf.intr_conf.lsc != 0)
1361                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1362         else {
1363                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1364                 (*dev->dev_ops->link_update)(dev, 0);
1365                 *eth_link = dev->data->dev_link;
1366         }
1367 }
1368
1369 int
1370 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1371 {
1372         struct rte_eth_dev *dev;
1373
1374         if (!rte_eth_dev_is_valid_port(port_id)) {
1375                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1376                 return (-ENODEV);
1377         }
1378
1379         dev = &rte_eth_devices[port_id];
1380         memset(stats, 0, sizeof(*stats));
1381
1382         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1383         (*dev->dev_ops->stats_get)(dev, stats);
1384         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1385         return 0;
1386 }
1387
1388 void
1389 rte_eth_stats_reset(uint8_t port_id)
1390 {
1391         struct rte_eth_dev *dev;
1392
1393         if (!rte_eth_dev_is_valid_port(port_id)) {
1394                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1395                 return;
1396         }
1397
1398         dev = &rte_eth_devices[port_id];
1399
1400         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1401         (*dev->dev_ops->stats_reset)(dev);
1402 }
1403
1404 /* retrieve ethdev extended statistics */
1405 int
1406 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1407         unsigned n)
1408 {
1409         struct rte_eth_stats eth_stats;
1410         struct rte_eth_dev *dev;
1411         unsigned count, i, q;
1412         uint64_t val;
1413         char *stats_ptr;
1414
1415         if (!rte_eth_dev_is_valid_port(port_id)) {
1416                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1417                 return -1;
1418         }
1419
1420         dev = &rte_eth_devices[port_id];
1421
1422         /* implemented by the driver */
1423         if (dev->dev_ops->xstats_get != NULL)
1424                 return (*dev->dev_ops->xstats_get)(dev, xstats, n);
1425
1426         /* else, return generic statistics */
1427         count = RTE_NB_STATS;
1428         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1429         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1430         if (n < count)
1431                 return count;
1432
1433         /* now fill the xstats structure */
1434
1435         count = 0;
1436         memset(&eth_stats, 0, sizeof(eth_stats));
1437         rte_eth_stats_get(port_id, &eth_stats);
1438
1439         /* global stats */
1440         for (i = 0; i < RTE_NB_STATS; i++) {
1441                 stats_ptr = (char *)&eth_stats + rte_stats_strings[i].offset;
1442                 val = *(uint64_t *)stats_ptr;
1443                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1444                         "%s", rte_stats_strings[i].name);
1445                 xstats[count++].value = val;
1446         }
1447
1448         /* per-rxq stats */
1449         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1450                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1451                         stats_ptr = (char *)&eth_stats;
1452                         stats_ptr += rte_rxq_stats_strings[i].offset;
1453                         stats_ptr += q * sizeof(uint64_t);
1454                         val = *(uint64_t *)stats_ptr;
1455                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1456                                 "rx_queue_%u_%s", q,
1457                                 rte_rxq_stats_strings[i].name);
1458                         xstats[count++].value = val;
1459                 }
1460         }
1461
1462         /* per-txq stats */
1463         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1464                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1465                         stats_ptr = (char *)&eth_stats;
1466                         stats_ptr += rte_txq_stats_strings[i].offset;
1467                         stats_ptr += q * sizeof(uint64_t);
1468                         val = *(uint64_t *)stats_ptr;
1469                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1470                                 "tx_queue_%u_%s", q,
1471                                 rte_txq_stats_strings[i].name);
1472                         xstats[count++].value = val;
1473                 }
1474         }
1475
1476         return count;
1477 }
1478
1479 /* reset ethdev extended statistics */
1480 void
1481 rte_eth_xstats_reset(uint8_t port_id)
1482 {
1483         struct rte_eth_dev *dev;
1484
1485         if (!rte_eth_dev_is_valid_port(port_id)) {
1486                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1487                 return;
1488         }
1489
1490         dev = &rte_eth_devices[port_id];
1491
1492         /* implemented by the driver */
1493         if (dev->dev_ops->xstats_reset != NULL) {
1494                 (*dev->dev_ops->xstats_reset)(dev);
1495                 return;
1496         }
1497
1498         /* fallback to default */
1499         rte_eth_stats_reset(port_id);
1500 }
1501
1502 static int
1503 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1504                 uint8_t is_rx)
1505 {
1506         struct rte_eth_dev *dev;
1507
1508         if (!rte_eth_dev_is_valid_port(port_id)) {
1509                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1510                 return -ENODEV;
1511         }
1512
1513         dev = &rte_eth_devices[port_id];
1514
1515         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1516         return (*dev->dev_ops->queue_stats_mapping_set)
1517                         (dev, queue_id, stat_idx, is_rx);
1518 }
1519
1520
1521 int
1522 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1523                 uint8_t stat_idx)
1524 {
1525         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1526                         STAT_QMAP_TX);
1527 }
1528
1529
1530 int
1531 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1532                 uint8_t stat_idx)
1533 {
1534         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1535                         STAT_QMAP_RX);
1536 }
1537
1538
1539 void
1540 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1541 {
1542         struct rte_eth_dev *dev;
1543
1544         if (!rte_eth_dev_is_valid_port(port_id)) {
1545                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1546                 return;
1547         }
1548
1549         dev = &rte_eth_devices[port_id];
1550
1551         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1552
1553         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1554         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1555         dev_info->pci_dev = dev->pci_dev;
1556         if (dev->driver)
1557                 dev_info->driver_name = dev->driver->pci_drv.name;
1558 }
1559
1560 void
1561 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1562 {
1563         struct rte_eth_dev *dev;
1564
1565         if (!rte_eth_dev_is_valid_port(port_id)) {
1566                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1567                 return;
1568         }
1569
1570         dev = &rte_eth_devices[port_id];
1571         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1572 }
1573
1574
1575 int
1576 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1577 {
1578         struct rte_eth_dev *dev;
1579
1580         if (!rte_eth_dev_is_valid_port(port_id)) {
1581                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1582                 return (-ENODEV);
1583         }
1584
1585         dev = &rte_eth_devices[port_id];
1586         *mtu = dev->data->mtu;
1587         return 0;
1588 }
1589
1590 int
1591 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1592 {
1593         int ret;
1594         struct rte_eth_dev *dev;
1595
1596         if (!rte_eth_dev_is_valid_port(port_id)) {
1597                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1598                 return (-ENODEV);
1599         }
1600
1601         dev = &rte_eth_devices[port_id];
1602         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1603
1604         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1605         if (!ret)
1606                 dev->data->mtu = mtu;
1607
1608         return ret;
1609 }
1610
1611 int
1612 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1613 {
1614         struct rte_eth_dev *dev;
1615
1616         if (!rte_eth_dev_is_valid_port(port_id)) {
1617                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1618                 return (-ENODEV);
1619         }
1620
1621         dev = &rte_eth_devices[port_id];
1622         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1623                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1624                 return (-ENOSYS);
1625         }
1626
1627         if (vlan_id > 4095) {
1628                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1629                                 port_id, (unsigned) vlan_id);
1630                 return (-EINVAL);
1631         }
1632         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1633         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1634         return (0);
1635 }
1636
1637 int
1638 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1639 {
1640         struct rte_eth_dev *dev;
1641
1642         if (!rte_eth_dev_is_valid_port(port_id)) {
1643                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1644                 return (-ENODEV);
1645         }
1646
1647         dev = &rte_eth_devices[port_id];
1648         if (rx_queue_id >= dev->data->nb_rx_queues) {
1649                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1650                 return (-EINVAL);
1651         }
1652
1653         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1654         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1655
1656         return (0);
1657 }
1658
1659 int
1660 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1661 {
1662         struct rte_eth_dev *dev;
1663
1664         if (!rte_eth_dev_is_valid_port(port_id)) {
1665                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1666                 return (-ENODEV);
1667         }
1668
1669         dev = &rte_eth_devices[port_id];
1670         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1671         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1672
1673         return (0);
1674 }
1675
1676 int
1677 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1678 {
1679         struct rte_eth_dev *dev;
1680         int ret = 0;
1681         int mask = 0;
1682         int cur, org = 0;
1683
1684         if (!rte_eth_dev_is_valid_port(port_id)) {
1685                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1686                 return (-ENODEV);
1687         }
1688
1689         dev = &rte_eth_devices[port_id];
1690
1691         /*check which option changed by application*/
1692         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1693         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1694         if (cur != org){
1695                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1696                 mask |= ETH_VLAN_STRIP_MASK;
1697         }
1698
1699         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1700         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1701         if (cur != org){
1702                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1703                 mask |= ETH_VLAN_FILTER_MASK;
1704         }
1705
1706         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1707         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1708         if (cur != org){
1709                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1710                 mask |= ETH_VLAN_EXTEND_MASK;
1711         }
1712
1713         /*no change*/
1714         if(mask == 0)
1715                 return ret;
1716
1717         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1718         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1719
1720         return ret;
1721 }
1722
1723 int
1724 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1725 {
1726         struct rte_eth_dev *dev;
1727         int ret = 0;
1728
1729         if (!rte_eth_dev_is_valid_port(port_id)) {
1730                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1731                 return (-ENODEV);
1732         }
1733
1734         dev = &rte_eth_devices[port_id];
1735
1736         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1737                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1738
1739         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1740                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1741
1742         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1743                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1744
1745         return ret;
1746 }
1747
1748 int
1749 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1750 {
1751         struct rte_eth_dev *dev;
1752
1753         if (!rte_eth_dev_is_valid_port(port_id)) {
1754                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1755                 return (-ENODEV);
1756         }
1757
1758         dev = &rte_eth_devices[port_id];
1759         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1760         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1761
1762         return 0;
1763 }
1764
1765 int
1766 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1767                                       struct rte_fdir_filter *fdir_filter,
1768                                       uint8_t queue)
1769 {
1770         struct rte_eth_dev *dev;
1771
1772         if (!rte_eth_dev_is_valid_port(port_id)) {
1773                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1774                 return (-ENODEV);
1775         }
1776
1777         dev = &rte_eth_devices[port_id];
1778
1779         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1780                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1781                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1782                 return (-ENOSYS);
1783         }
1784
1785         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1786              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1787             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1788                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1789                                 "None l4type, source & destinations ports " \
1790                                 "should be null!\n");
1791                 return (-EINVAL);
1792         }
1793
1794         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1795         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1796                                                                 queue);
1797 }
1798
1799 int
1800 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1801                                          struct rte_fdir_filter *fdir_filter,
1802                                          uint8_t queue)
1803 {
1804         struct rte_eth_dev *dev;
1805
1806         if (!rte_eth_dev_is_valid_port(port_id)) {
1807                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1808                 return (-ENODEV);
1809         }
1810
1811         dev = &rte_eth_devices[port_id];
1812
1813         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1814                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1815                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1816                 return (-ENOSYS);
1817         }
1818
1819         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1820              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1821             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1822                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1823                                 "None l4type, source & destinations ports " \
1824                                 "should be null!\n");
1825                 return (-EINVAL);
1826         }
1827
1828         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1829         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1830                                                                 queue);
1831
1832 }
1833
1834 int
1835 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1836                                          struct rte_fdir_filter *fdir_filter)
1837 {
1838         struct rte_eth_dev *dev;
1839
1840         if (!rte_eth_dev_is_valid_port(port_id)) {
1841                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1842                 return (-ENODEV);
1843         }
1844
1845         dev = &rte_eth_devices[port_id];
1846
1847         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1848                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1849                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1850                 return (-ENOSYS);
1851         }
1852
1853         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1854              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1855             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1856                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1857                                 "None l4type source & destinations ports " \
1858                                 "should be null!\n");
1859                 return (-EINVAL);
1860         }
1861
1862         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1863         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1864 }
1865
1866 int
1867 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1868 {
1869         struct rte_eth_dev *dev;
1870
1871         if (!rte_eth_dev_is_valid_port(port_id)) {
1872                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1873                 return (-ENODEV);
1874         }
1875
1876         dev = &rte_eth_devices[port_id];
1877         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1878                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1879                 return (-ENOSYS);
1880         }
1881
1882         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1883
1884         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1885         return (0);
1886 }
1887
1888 int
1889 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1890                                     struct rte_fdir_filter *fdir_filter,
1891                                     uint16_t soft_id, uint8_t queue,
1892                                     uint8_t drop)
1893 {
1894         struct rte_eth_dev *dev;
1895
1896         if (!rte_eth_dev_is_valid_port(port_id)) {
1897                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1898                 return (-ENODEV);
1899         }
1900
1901         dev = &rte_eth_devices[port_id];
1902
1903         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1904                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1905                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1906                 return (-ENOSYS);
1907         }
1908
1909         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1910              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1911             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1912                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1913                                 "None l4type, source & destinations ports " \
1914                                 "should be null!\n");
1915                 return (-EINVAL);
1916         }
1917
1918         /* For now IPv6 is not supported with perfect filter */
1919         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1920                 return (-ENOTSUP);
1921
1922         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1923         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1924                                                                 soft_id, queue,
1925                                                                 drop);
1926 }
1927
1928 int
1929 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1930                                        struct rte_fdir_filter *fdir_filter,
1931                                        uint16_t soft_id, uint8_t queue,
1932                                        uint8_t drop)
1933 {
1934         struct rte_eth_dev *dev;
1935
1936         if (!rte_eth_dev_is_valid_port(port_id)) {
1937                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1938                 return (-ENODEV);
1939         }
1940
1941         dev = &rte_eth_devices[port_id];
1942
1943         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1944                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1945                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1946                 return (-ENOSYS);
1947         }
1948
1949         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1950              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1951             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1952                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1953                                 "None l4type, source & destinations ports " \
1954                                 "should be null!\n");
1955                 return (-EINVAL);
1956         }
1957
1958         /* For now IPv6 is not supported with perfect filter */
1959         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1960                 return (-ENOTSUP);
1961
1962         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1963         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1964                                                         soft_id, queue, drop);
1965 }
1966
1967 int
1968 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1969                                        struct rte_fdir_filter *fdir_filter,
1970                                        uint16_t soft_id)
1971 {
1972         struct rte_eth_dev *dev;
1973
1974         if (!rte_eth_dev_is_valid_port(port_id)) {
1975                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1976                 return (-ENODEV);
1977         }
1978
1979         dev = &rte_eth_devices[port_id];
1980
1981         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1982                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1983                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1984                 return (-ENOSYS);
1985         }
1986
1987         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1988              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1989             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1990                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1991                                 "None l4type, source & destinations ports " \
1992                                 "should be null!\n");
1993                 return (-EINVAL);
1994         }
1995
1996         /* For now IPv6 is not supported with perfect filter */
1997         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1998                 return (-ENOTSUP);
1999
2000         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
2001         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
2002                                                                 soft_id);
2003 }
2004
2005 int
2006 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
2007 {
2008         struct rte_eth_dev *dev;
2009
2010         if (!rte_eth_dev_is_valid_port(port_id)) {
2011                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2012                 return (-ENODEV);
2013         }
2014
2015         dev = &rte_eth_devices[port_id];
2016         if (! (dev->data->dev_conf.fdir_conf.mode)) {
2017                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2018                 return (-ENOSYS);
2019         }
2020
2021         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
2022         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
2023 }
2024
2025 int
2026 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2027 {
2028         struct rte_eth_dev *dev;
2029
2030         if (!rte_eth_dev_is_valid_port(port_id)) {
2031                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2032                 return (-ENODEV);
2033         }
2034
2035         dev = &rte_eth_devices[port_id];
2036         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2037         memset(fc_conf, 0, sizeof(*fc_conf));
2038         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2039 }
2040
2041 int
2042 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2043 {
2044         struct rte_eth_dev *dev;
2045
2046         if (!rte_eth_dev_is_valid_port(port_id)) {
2047                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2048                 return (-ENODEV);
2049         }
2050
2051         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2052                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2053                 return (-EINVAL);
2054         }
2055
2056         dev = &rte_eth_devices[port_id];
2057         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2058         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2059 }
2060
2061 int
2062 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
2063 {
2064         struct rte_eth_dev *dev;
2065
2066         if (!rte_eth_dev_is_valid_port(port_id)) {
2067                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2068                 return (-ENODEV);
2069         }
2070
2071         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2072                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2073                 return (-EINVAL);
2074         }
2075
2076         dev = &rte_eth_devices[port_id];
2077         /* High water, low water validation are device specific */
2078         if  (*dev->dev_ops->priority_flow_ctrl_set)
2079                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2080         return (-ENOTSUP);
2081 }
2082
2083 static inline int
2084 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2085                         uint16_t reta_size)
2086 {
2087         uint16_t i, num;
2088
2089         if (!reta_conf)
2090                 return -EINVAL;
2091
2092         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
2093                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
2094                                                         RTE_RETA_GROUP_SIZE);
2095                 return -EINVAL;
2096         }
2097
2098         num = reta_size / RTE_RETA_GROUP_SIZE;
2099         for (i = 0; i < num; i++) {
2100                 if (reta_conf[i].mask)
2101                         return 0;
2102         }
2103
2104         return -EINVAL;
2105 }
2106
2107 static inline int
2108 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2109                          uint16_t reta_size,
2110                          uint8_t max_rxq)
2111 {
2112         uint16_t i, idx, shift;
2113
2114         if (!reta_conf)
2115                 return -EINVAL;
2116
2117         if (max_rxq == 0) {
2118                 PMD_DEBUG_TRACE("No receive queue is available\n");
2119                 return -EINVAL;
2120         }
2121
2122         for (i = 0; i < reta_size; i++) {
2123                 idx = i / RTE_RETA_GROUP_SIZE;
2124                 shift = i % RTE_RETA_GROUP_SIZE;
2125                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2126                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2127                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2128                                 "the maximum rxq index: %u\n", idx, shift,
2129                                 reta_conf[idx].reta[shift], max_rxq);
2130                         return -EINVAL;
2131                 }
2132         }
2133
2134         return 0;
2135 }
2136
2137 int
2138 rte_eth_dev_rss_reta_update(uint8_t port_id,
2139                             struct rte_eth_rss_reta_entry64 *reta_conf,
2140                             uint16_t reta_size)
2141 {
2142         struct rte_eth_dev *dev;
2143         int ret;
2144
2145         if (!rte_eth_dev_is_valid_port(port_id)) {
2146                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2147                 return -ENODEV;
2148         }
2149
2150         /* Check mask bits */
2151         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2152         if (ret < 0)
2153                 return ret;
2154
2155         dev = &rte_eth_devices[port_id];
2156
2157         /* Check entry value */
2158         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2159                                 dev->data->nb_rx_queues);
2160         if (ret < 0)
2161                 return ret;
2162
2163         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2164         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2165 }
2166
2167 int
2168 rte_eth_dev_rss_reta_query(uint8_t port_id,
2169                            struct rte_eth_rss_reta_entry64 *reta_conf,
2170                            uint16_t reta_size)
2171 {
2172         struct rte_eth_dev *dev;
2173         int ret;
2174
2175         if (port_id >= nb_ports) {
2176                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2177                 return -ENODEV;
2178         }
2179
2180         /* Check mask bits */
2181         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2182         if (ret < 0)
2183                 return ret;
2184
2185         dev = &rte_eth_devices[port_id];
2186         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2187         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2188 }
2189
2190 int
2191 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2192 {
2193         struct rte_eth_dev *dev;
2194         uint16_t rss_hash_protos;
2195
2196         if (!rte_eth_dev_is_valid_port(port_id)) {
2197                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2198                 return (-ENODEV);
2199         }
2200
2201         rss_hash_protos = rss_conf->rss_hf;
2202         if ((rss_hash_protos != 0) &&
2203             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2204                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2205                                 rss_hash_protos);
2206                 return (-EINVAL);
2207         }
2208         dev = &rte_eth_devices[port_id];
2209         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2210         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2211 }
2212
2213 int
2214 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2215                               struct rte_eth_rss_conf *rss_conf)
2216 {
2217         struct rte_eth_dev *dev;
2218
2219         if (!rte_eth_dev_is_valid_port(port_id)) {
2220                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2221                 return (-ENODEV);
2222         }
2223
2224         dev = &rte_eth_devices[port_id];
2225         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2226         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2227 }
2228
2229 int
2230 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2231                            struct rte_eth_udp_tunnel *udp_tunnel)
2232 {
2233         struct rte_eth_dev *dev;
2234
2235         if (!rte_eth_dev_is_valid_port(port_id)) {
2236                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2237                 return -ENODEV;
2238         }
2239
2240         if (udp_tunnel == NULL) {
2241                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2242                 return -EINVAL;
2243         }
2244
2245         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2246                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2247                 return -EINVAL;
2248         }
2249
2250         dev = &rte_eth_devices[port_id];
2251         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2252         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2253 }
2254
2255 int
2256 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2257                               struct rte_eth_udp_tunnel *udp_tunnel)
2258 {
2259         struct rte_eth_dev *dev;
2260
2261         if (!rte_eth_dev_is_valid_port(port_id)) {
2262                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2263                 return -ENODEV;
2264         }
2265
2266         dev = &rte_eth_devices[port_id];
2267
2268         if (udp_tunnel == NULL) {
2269                 PMD_DEBUG_TRACE("Invalid udp_tunnel parametr\n");
2270                 return -EINVAL;
2271         }
2272
2273         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2274                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2275                 return -EINVAL;
2276         }
2277
2278         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2279         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2280 }
2281
2282 int
2283 rte_eth_led_on(uint8_t port_id)
2284 {
2285         struct rte_eth_dev *dev;
2286
2287         if (!rte_eth_dev_is_valid_port(port_id)) {
2288                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2289                 return (-ENODEV);
2290         }
2291
2292         dev = &rte_eth_devices[port_id];
2293         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2294         return ((*dev->dev_ops->dev_led_on)(dev));
2295 }
2296
2297 int
2298 rte_eth_led_off(uint8_t port_id)
2299 {
2300         struct rte_eth_dev *dev;
2301
2302         if (!rte_eth_dev_is_valid_port(port_id)) {
2303                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2304                 return (-ENODEV);
2305         }
2306
2307         dev = &rte_eth_devices[port_id];
2308         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2309         return ((*dev->dev_ops->dev_led_off)(dev));
2310 }
2311
2312 /*
2313  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2314  * an empty spot.
2315  */
2316 static inline int
2317 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2318 {
2319         struct rte_eth_dev_info dev_info;
2320         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2321         unsigned i;
2322
2323         rte_eth_dev_info_get(port_id, &dev_info);
2324
2325         for (i = 0; i < dev_info.max_mac_addrs; i++)
2326                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2327                         return i;
2328
2329         return -1;
2330 }
2331
2332 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
2333
2334 int
2335 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2336                         uint32_t pool)
2337 {
2338         struct rte_eth_dev *dev;
2339         int index;
2340         uint64_t pool_mask;
2341
2342         if (!rte_eth_dev_is_valid_port(port_id)) {
2343                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2344                 return (-ENODEV);
2345         }
2346
2347         dev = &rte_eth_devices[port_id];
2348         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2349
2350         if (is_zero_ether_addr(addr)) {
2351                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2352                         port_id);
2353                 return (-EINVAL);
2354         }
2355         if (pool >= ETH_64_POOLS) {
2356                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
2357                 return (-EINVAL);
2358         }
2359
2360         index = get_mac_addr_index(port_id, addr);
2361         if (index < 0) {
2362                 index = get_mac_addr_index(port_id, &null_mac_addr);
2363                 if (index < 0) {
2364                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2365                                 port_id);
2366                         return (-ENOSPC);
2367                 }
2368         } else {
2369                 pool_mask = dev->data->mac_pool_sel[index];
2370
2371                 /* Check if both MAC address and pool is alread there, and do nothing */
2372                 if (pool_mask & (1ULL << pool))
2373                         return 0;
2374         }
2375
2376         /* Update NIC */
2377         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2378
2379         /* Update address in NIC data structure */
2380         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2381
2382         /* Update pool bitmap in NIC data structure */
2383         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2384
2385         return 0;
2386 }
2387
2388 int
2389 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2390 {
2391         struct rte_eth_dev *dev;
2392         int index;
2393
2394         if (!rte_eth_dev_is_valid_port(port_id)) {
2395                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2396                 return (-ENODEV);
2397         }
2398
2399         dev = &rte_eth_devices[port_id];
2400         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2401
2402         index = get_mac_addr_index(port_id, addr);
2403         if (index == 0) {
2404                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2405                 return (-EADDRINUSE);
2406         } else if (index < 0)
2407                 return 0;  /* Do nothing if address wasn't found */
2408
2409         /* Update NIC */
2410         (*dev->dev_ops->mac_addr_remove)(dev, index);
2411
2412         /* Update address in NIC data structure */
2413         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2414
2415         /* reset pool bitmap */
2416         dev->data->mac_pool_sel[index] = 0;
2417
2418         return 0;
2419 }
2420
2421 int
2422 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2423                                 uint16_t rx_mode, uint8_t on)
2424 {
2425         uint16_t num_vfs;
2426         struct rte_eth_dev *dev;
2427         struct rte_eth_dev_info dev_info;
2428
2429         if (!rte_eth_dev_is_valid_port(port_id)) {
2430                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
2431                                 port_id);
2432                 return (-ENODEV);
2433         }
2434
2435         dev = &rte_eth_devices[port_id];
2436         rte_eth_dev_info_get(port_id, &dev_info);
2437
2438         num_vfs = dev_info.max_vfs;
2439         if (vf > num_vfs)
2440         {
2441                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2442                 return (-EINVAL);
2443         }
2444         if (rx_mode == 0)
2445         {
2446                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2447                 return (-EINVAL);
2448         }
2449         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2450         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2451 }
2452
2453 /*
2454  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2455  * an empty spot.
2456  */
2457 static inline int
2458 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2459 {
2460         struct rte_eth_dev_info dev_info;
2461         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2462         unsigned i;
2463
2464         rte_eth_dev_info_get(port_id, &dev_info);
2465         if (!dev->data->hash_mac_addrs)
2466                 return -1;
2467
2468         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2469                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2470                         ETHER_ADDR_LEN) == 0)
2471                         return i;
2472
2473         return -1;
2474 }
2475
2476 int
2477 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2478                                 uint8_t on)
2479 {
2480         int index;
2481         int ret;
2482         struct rte_eth_dev *dev;
2483
2484         if (!rte_eth_dev_is_valid_port(port_id)) {
2485                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2486                         port_id);
2487                 return (-ENODEV);
2488         }
2489
2490         dev = &rte_eth_devices[port_id];
2491         if (is_zero_ether_addr(addr)) {
2492                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2493                         port_id);
2494                 return (-EINVAL);
2495         }
2496
2497         index = get_hash_mac_addr_index(port_id, addr);
2498         /* Check if it's already there, and do nothing */
2499         if ((index >= 0) && (on))
2500                 return 0;
2501
2502         if (index < 0) {
2503                 if (!on) {
2504                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
2505                                 "set in UTA\n", port_id);
2506                         return (-EINVAL);
2507                 }
2508
2509                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2510                 if (index < 0) {
2511                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2512                                         port_id);
2513                         return (-ENOSPC);
2514                 }
2515         }
2516
2517         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2518         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2519         if (ret == 0) {
2520                 /* Update address in NIC data structure */
2521                 if (on)
2522                         ether_addr_copy(addr,
2523                                         &dev->data->hash_mac_addrs[index]);
2524                 else
2525                         ether_addr_copy(&null_mac_addr,
2526                                         &dev->data->hash_mac_addrs[index]);
2527         }
2528
2529         return ret;
2530 }
2531
2532 int
2533 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2534 {
2535         struct rte_eth_dev *dev;
2536
2537         if (!rte_eth_dev_is_valid_port(port_id)) {
2538                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2539                         port_id);
2540                 return (-ENODEV);
2541         }
2542
2543         dev = &rte_eth_devices[port_id];
2544
2545         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2546         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2547 }
2548
2549 int
2550 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
2551 {
2552         uint16_t num_vfs;
2553         struct rte_eth_dev *dev;
2554         struct rte_eth_dev_info dev_info;
2555
2556         if (!rte_eth_dev_is_valid_port(port_id)) {
2557                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2558                 return (-ENODEV);
2559         }
2560
2561         dev = &rte_eth_devices[port_id];
2562         rte_eth_dev_info_get(port_id, &dev_info);
2563
2564         num_vfs = dev_info.max_vfs;
2565         if (vf > num_vfs)
2566         {
2567                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2568                 return (-EINVAL);
2569         }
2570
2571         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2572         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2573 }
2574
2575 int
2576 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2577 {
2578         uint16_t num_vfs;
2579         struct rte_eth_dev *dev;
2580         struct rte_eth_dev_info dev_info;
2581
2582         if (!rte_eth_dev_is_valid_port(port_id)) {
2583                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2584                 return (-ENODEV);
2585         }
2586
2587         dev = &rte_eth_devices[port_id];
2588         rte_eth_dev_info_get(port_id, &dev_info);
2589
2590         num_vfs = dev_info.max_vfs;
2591         if (vf > num_vfs)
2592         {
2593                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2594                 return (-EINVAL);
2595         }
2596
2597         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2598         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2599 }
2600
2601 int
2602 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2603                                  uint64_t vf_mask,uint8_t vlan_on)
2604 {
2605         struct rte_eth_dev *dev;
2606
2607         if (!rte_eth_dev_is_valid_port(port_id)) {
2608                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2609                                 port_id);
2610                 return (-ENODEV);
2611         }
2612         dev = &rte_eth_devices[port_id];
2613
2614         if(vlan_id > ETHER_MAX_VLAN_ID)
2615         {
2616                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2617                         vlan_id);
2618                 return (-EINVAL);
2619         }
2620         if (vf_mask == 0)
2621         {
2622                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2623                 return (-EINVAL);
2624         }
2625
2626         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2627         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2628                                                 vf_mask,vlan_on);
2629 }
2630
2631 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2632                                         uint16_t tx_rate)
2633 {
2634         struct rte_eth_dev *dev;
2635         struct rte_eth_dev_info dev_info;
2636         struct rte_eth_link link;
2637
2638         if (!rte_eth_dev_is_valid_port(port_id)) {
2639                 PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
2640                                 port_id);
2641                 return -ENODEV;
2642         }
2643
2644         dev = &rte_eth_devices[port_id];
2645         rte_eth_dev_info_get(port_id, &dev_info);
2646         link = dev->data->dev_link;
2647
2648         if (queue_idx > dev_info.max_tx_queues) {
2649                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2650                                 "invalid queue id=%d\n", port_id, queue_idx);
2651                 return -EINVAL;
2652         }
2653
2654         if (tx_rate > link.link_speed) {
2655                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2656                                 "bigger than link speed= %d\n",
2657                         tx_rate, link.link_speed);
2658                 return -EINVAL;
2659         }
2660
2661         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2662         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2663 }
2664
2665 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2666                                 uint64_t q_msk)
2667 {
2668         struct rte_eth_dev *dev;
2669         struct rte_eth_dev_info dev_info;
2670         struct rte_eth_link link;
2671
2672         if (q_msk == 0)
2673                 return 0;
2674
2675         if (!rte_eth_dev_is_valid_port(port_id)) {
2676                 PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
2677                                 port_id);
2678                 return -ENODEV;
2679         }
2680
2681         dev = &rte_eth_devices[port_id];
2682         rte_eth_dev_info_get(port_id, &dev_info);
2683         link = dev->data->dev_link;
2684
2685         if (vf > dev_info.max_vfs) {
2686                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2687                                 "invalid vf id=%d\n", port_id, vf);
2688                 return -EINVAL;
2689         }
2690
2691         if (tx_rate > link.link_speed) {
2692                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2693                                 "bigger than link speed= %d\n",
2694                                 tx_rate, link.link_speed);
2695                 return -EINVAL;
2696         }
2697
2698         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2699         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2700 }
2701
2702 int
2703 rte_eth_mirror_rule_set(uint8_t port_id,
2704                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
2705                         uint8_t rule_id, uint8_t on)
2706 {
2707         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2708
2709         if (!rte_eth_dev_is_valid_port(port_id)) {
2710                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2711                 return (-ENODEV);
2712         }
2713
2714         if (mirror_conf->rule_type_mask == 0) {
2715                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2716                 return (-EINVAL);
2717         }
2718
2719         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2720                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
2721                         "be 0-%d\n",ETH_64_POOLS - 1);
2722                 return (-EINVAL);
2723         }
2724
2725         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
2726                 (mirror_conf->pool_mask == 0)) {
2727                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
2728                                 "be 0.\n");
2729                 return (-EINVAL);
2730         }
2731
2732         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2733         {
2734                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2735                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
2736                 return (-EINVAL);
2737         }
2738
2739         dev = &rte_eth_devices[port_id];
2740         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2741
2742         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2743 }
2744
2745 int
2746 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2747 {
2748         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2749
2750         if (!rte_eth_dev_is_valid_port(port_id)) {
2751                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2752                 return (-ENODEV);
2753         }
2754
2755         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2756         {
2757                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2758                         ETH_VMDQ_NUM_MIRROR_RULE-1);
2759                 return (-EINVAL);
2760         }
2761
2762         dev = &rte_eth_devices[port_id];
2763         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2764
2765         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2766 }
2767
2768 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2769 uint16_t
2770 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2771                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2772 {
2773         struct rte_eth_dev *dev;
2774
2775         if (!rte_eth_dev_is_valid_port(port_id)) {
2776                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2777                 return 0;
2778         }
2779
2780         dev = &rte_eth_devices[port_id];
2781         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2782         if (queue_id >= dev->data->nb_rx_queues) {
2783                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2784                 return 0;
2785         }
2786         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2787                                                 rx_pkts, nb_pkts);
2788 }
2789
2790 uint16_t
2791 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2792                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2793 {
2794         struct rte_eth_dev *dev;
2795
2796         if (!rte_eth_dev_is_valid_port(port_id)) {
2797                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2798                 return 0;
2799         }
2800
2801         dev = &rte_eth_devices[port_id];
2802
2803         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2804         if (queue_id >= dev->data->nb_tx_queues) {
2805                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2806                 return 0;
2807         }
2808         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2809                                                 tx_pkts, nb_pkts);
2810 }
2811
2812 uint32_t
2813 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2814 {
2815         struct rte_eth_dev *dev;
2816
2817         if (!rte_eth_dev_is_valid_port(port_id)) {
2818                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2819                 return 0;
2820         }
2821
2822         dev = &rte_eth_devices[port_id];
2823         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2824         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2825 }
2826
2827 int
2828 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2829 {
2830         struct rte_eth_dev *dev;
2831
2832         if (!rte_eth_dev_is_valid_port(port_id)) {
2833                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2834                 return (-ENODEV);
2835         }
2836
2837         dev = &rte_eth_devices[port_id];
2838         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2839         return (*dev->dev_ops->rx_descriptor_done)( \
2840                 dev->data->rx_queues[queue_id], offset);
2841 }
2842 #endif
2843
2844 int
2845 rte_eth_dev_callback_register(uint8_t port_id,
2846                         enum rte_eth_event_type event,
2847                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2848 {
2849         struct rte_eth_dev *dev;
2850         struct rte_eth_dev_callback *user_cb;
2851
2852         if (!cb_fn)
2853                 return (-EINVAL);
2854
2855         if (!rte_eth_dev_is_valid_port(port_id)) {
2856                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2857                 return (-EINVAL);
2858         }
2859
2860         dev = &rte_eth_devices[port_id];
2861         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2862
2863         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2864                 if (user_cb->cb_fn == cb_fn &&
2865                         user_cb->cb_arg == cb_arg &&
2866                         user_cb->event == event) {
2867                         break;
2868                 }
2869         }
2870
2871         /* create a new callback. */
2872         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2873                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2874                 user_cb->cb_fn = cb_fn;
2875                 user_cb->cb_arg = cb_arg;
2876                 user_cb->event = event;
2877                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2878         }
2879
2880         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2881         return ((user_cb == NULL) ? -ENOMEM : 0);
2882 }
2883
2884 int
2885 rte_eth_dev_callback_unregister(uint8_t port_id,
2886                         enum rte_eth_event_type event,
2887                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2888 {
2889         int ret;
2890         struct rte_eth_dev *dev;
2891         struct rte_eth_dev_callback *cb, *next;
2892
2893         if (!cb_fn)
2894                 return (-EINVAL);
2895
2896         if (!rte_eth_dev_is_valid_port(port_id)) {
2897                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2898                 return (-EINVAL);
2899         }
2900
2901         dev = &rte_eth_devices[port_id];
2902         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2903
2904         ret = 0;
2905         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2906
2907                 next = TAILQ_NEXT(cb, next);
2908
2909                 if (cb->cb_fn != cb_fn || cb->event != event ||
2910                                 (cb->cb_arg != (void *)-1 &&
2911                                 cb->cb_arg != cb_arg))
2912                         continue;
2913
2914                 /*
2915                  * if this callback is not executing right now,
2916                  * then remove it.
2917                  */
2918                 if (cb->active == 0) {
2919                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2920                         rte_free(cb);
2921                 } else {
2922                         ret = -EAGAIN;
2923                 }
2924         }
2925
2926         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2927         return (ret);
2928 }
2929
2930 void
2931 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2932         enum rte_eth_event_type event)
2933 {
2934         struct rte_eth_dev_callback *cb_lst;
2935         struct rte_eth_dev_callback dev_cb;
2936
2937         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2938         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2939                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2940                         continue;
2941                 dev_cb = *cb_lst;
2942                 cb_lst->active = 1;
2943                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2944                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2945                                                 dev_cb.cb_arg);
2946                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2947                 cb_lst->active = 0;
2948         }
2949         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2950 }
2951 #ifdef RTE_NIC_BYPASS
2952 int rte_eth_dev_bypass_init(uint8_t port_id)
2953 {
2954         struct rte_eth_dev *dev;
2955
2956         if (!rte_eth_dev_is_valid_port(port_id)) {
2957                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2958                 return (-ENODEV);
2959         }
2960
2961         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2962                 PMD_DEBUG_TRACE("Invalid port device\n");
2963                 return (-ENODEV);
2964         }
2965
2966         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2967         (*dev->dev_ops->bypass_init)(dev);
2968         return 0;
2969 }
2970
2971 int
2972 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2973 {
2974         struct rte_eth_dev *dev;
2975
2976         if (!rte_eth_dev_is_valid_port(port_id)) {
2977                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2978                 return (-ENODEV);
2979         }
2980
2981         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2982                 PMD_DEBUG_TRACE("Invalid port device\n");
2983                 return (-ENODEV);
2984         }
2985         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2986         (*dev->dev_ops->bypass_state_show)(dev, state);
2987         return 0;
2988 }
2989
2990 int
2991 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2992 {
2993         struct rte_eth_dev *dev;
2994
2995         if (!rte_eth_dev_is_valid_port(port_id)) {
2996                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2997                 return (-ENODEV);
2998         }
2999
3000         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3001                 PMD_DEBUG_TRACE("Invalid port device\n");
3002                 return (-ENODEV);
3003         }
3004
3005         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
3006         (*dev->dev_ops->bypass_state_set)(dev, new_state);
3007         return 0;
3008 }
3009
3010 int
3011 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
3012 {
3013         struct rte_eth_dev *dev;
3014
3015         if (!rte_eth_dev_is_valid_port(port_id)) {
3016                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3017                 return (-ENODEV);
3018         }
3019
3020         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3021                 PMD_DEBUG_TRACE("Invalid port device\n");
3022                 return (-ENODEV);
3023         }
3024
3025         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3026         (*dev->dev_ops->bypass_event_show)(dev, event, state);
3027         return 0;
3028 }
3029
3030 int
3031 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
3032 {
3033         struct rte_eth_dev *dev;
3034
3035         if (!rte_eth_dev_is_valid_port(port_id)) {
3036                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3037                 return (-ENODEV);
3038         }
3039
3040         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3041                 PMD_DEBUG_TRACE("Invalid port device\n");
3042                 return (-ENODEV);
3043         }
3044
3045         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
3046         (*dev->dev_ops->bypass_event_set)(dev, event, state);
3047         return 0;
3048 }
3049
3050 int
3051 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
3052 {
3053         struct rte_eth_dev *dev;
3054
3055         if (!rte_eth_dev_is_valid_port(port_id)) {
3056                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3057                 return (-ENODEV);
3058         }
3059
3060         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3061                 PMD_DEBUG_TRACE("Invalid port device\n");
3062                 return (-ENODEV);
3063         }
3064
3065         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
3066         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
3067         return 0;
3068 }
3069
3070 int
3071 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
3072 {
3073         struct rte_eth_dev *dev;
3074
3075         if (!rte_eth_dev_is_valid_port(port_id)) {
3076                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3077                 return (-ENODEV);
3078         }
3079
3080         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3081                 PMD_DEBUG_TRACE("Invalid port device\n");
3082                 return (-ENODEV);
3083         }
3084
3085         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
3086         (*dev->dev_ops->bypass_ver_show)(dev, ver);
3087         return 0;
3088 }
3089
3090 int
3091 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
3092 {
3093         struct rte_eth_dev *dev;
3094
3095         if (!rte_eth_dev_is_valid_port(port_id)) {
3096                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3097                 return (-ENODEV);
3098         }
3099
3100         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3101                 PMD_DEBUG_TRACE("Invalid port device\n");
3102                 return (-ENODEV);
3103         }
3104
3105         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
3106         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
3107         return 0;
3108 }
3109
3110 int
3111 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
3112 {
3113         struct rte_eth_dev *dev;
3114
3115         if (!rte_eth_dev_is_valid_port(port_id)) {
3116                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3117                 return (-ENODEV);
3118         }
3119
3120         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3121                 PMD_DEBUG_TRACE("Invalid port device\n");
3122                 return (-ENODEV);
3123         }
3124
3125         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
3126         (*dev->dev_ops->bypass_wd_reset)(dev);
3127         return 0;
3128 }
3129 #endif
3130
3131 int
3132 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
3133 {
3134         struct rte_eth_dev *dev;
3135
3136         if (!rte_eth_dev_is_valid_port(port_id)) {
3137                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3138                 return -ENODEV;
3139         }
3140
3141         dev = &rte_eth_devices[port_id];
3142         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3143         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3144                                 RTE_ETH_FILTER_NOP, NULL);
3145 }
3146
3147 int
3148 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3149                        enum rte_filter_op filter_op, void *arg)
3150 {
3151         struct rte_eth_dev *dev;
3152
3153         if (!rte_eth_dev_is_valid_port(port_id)) {
3154                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3155                 return -ENODEV;
3156         }
3157
3158         dev = &rte_eth_devices[port_id];
3159         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3160         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3161 }
3162
3163 void *
3164 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3165                 rte_rxtx_callback_fn fn, void *user_param)
3166 {
3167 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3168         rte_errno = ENOTSUP;
3169         return NULL;
3170 #endif
3171         /* check input parameters */
3172         if (port_id >= nb_ports || fn == NULL ||
3173                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3174                 rte_errno = EINVAL;
3175                 return NULL;
3176         }
3177
3178         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3179
3180         if (cb == NULL) {
3181                 rte_errno = ENOMEM;
3182                 return NULL;
3183         }
3184
3185         cb->fn = fn;
3186         cb->param = user_param;
3187         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3188         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3189         return cb;
3190 }
3191
3192 void *
3193 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3194                 rte_rxtx_callback_fn fn, void *user_param)
3195 {
3196 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3197         rte_errno = ENOTSUP;
3198         return NULL;
3199 #endif
3200         /* check input parameters */
3201         if (port_id >= nb_ports || fn == NULL ||
3202                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3203                 rte_errno = EINVAL;
3204                 return NULL;
3205         }
3206
3207         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3208
3209         if (cb == NULL) {
3210                 rte_errno = ENOMEM;
3211                 return NULL;
3212         }
3213
3214         cb->fn = fn;
3215         cb->param = user_param;
3216         cb->next = rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3217         rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3218         return cb;
3219 }
3220
3221 int
3222 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3223                 struct rte_eth_rxtx_callback *user_cb)
3224 {
3225 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3226         return (-ENOTSUP);
3227 #endif
3228         /* Check input parameters. */
3229         if (port_id >= nb_ports || user_cb == NULL ||
3230                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3231                 return (-EINVAL);
3232         }
3233
3234         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3235         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3236         struct rte_eth_rxtx_callback *prev_cb;
3237
3238         /* Reset head pointer and remove user cb if first in the list. */
3239         if (cb == user_cb) {
3240                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3241                 return 0;
3242         }
3243
3244         /* Remove the user cb from the callback list. */
3245         do {
3246                 prev_cb = cb;
3247                 cb = cb->next;
3248
3249                 if (cb == user_cb) {
3250                         prev_cb->next = user_cb->next;
3251                         return 0;
3252                 }
3253
3254         } while (cb != NULL);
3255
3256         /* Callback wasn't found. */
3257         return (-EINVAL);
3258 }
3259
3260 int
3261 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3262                 struct rte_eth_rxtx_callback *user_cb)
3263 {
3264 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3265         return (-ENOTSUP);
3266 #endif
3267         /* Check input parameters. */
3268         if (port_id >= nb_ports || user_cb == NULL ||
3269                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3270                 return (-EINVAL);
3271         }
3272
3273         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3274         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3275         struct rte_eth_rxtx_callback *prev_cb;
3276
3277         /* Reset head pointer and remove user cb if first in the list. */
3278         if (cb == user_cb) {
3279                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3280                 return 0;
3281         }
3282
3283         /* Remove the user cb from the callback list. */
3284         do {
3285                 prev_cb = cb;
3286                 cb = cb->next;
3287
3288                 if (cb == user_cb) {
3289                         prev_cb->next = user_cb->next;
3290                         return 0;
3291                 }
3292
3293         } while (cb != NULL);
3294
3295         /* Callback wasn't found. */
3296         return (-EINVAL);
3297 }