ethdev: release port
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68 #include <rte_string_fns.h>
69
70 #include "rte_ether.h"
71 #include "rte_ethdev.h"
72
73 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
74 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
75                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
76         } while (0)
77 #else
78 #define PMD_DEBUG_TRACE(fmt, args...)
79 #endif
80
81 /* Macros for checking for restricting functions to primary instance only */
82 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
83         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
84                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
85                 return (retval); \
86         } \
87 } while(0)
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while(0)
94
95 /* Macros to check for invlaid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while(0)
102 #define FUNC_PTR_OR_RET(func) do { \
103         if ((func) == NULL) { \
104                 PMD_DEBUG_TRACE("Function not supported\n"); \
105                 return; \
106         } \
107 } while(0)
108
109 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
110 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
111 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
112 static uint8_t nb_ports = 0;
113
114 /* spinlock for eth device callbacks */
115 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
116
117 /* store statistics names and its offset in stats structure  */
118 struct rte_eth_xstats_name_off {
119         char name[RTE_ETH_XSTATS_NAME_SIZE];
120         unsigned offset;
121 };
122
123 static struct rte_eth_xstats_name_off rte_stats_strings[] = {
124          {"rx_packets", offsetof(struct rte_eth_stats, ipackets)},
125          {"tx_packets", offsetof(struct rte_eth_stats, opackets)},
126          {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)},
127          {"tx_bytes", offsetof(struct rte_eth_stats, obytes)},
128          {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
129          {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
130          {"rx_crc_errors", offsetof(struct rte_eth_stats, ibadcrc)},
131          {"rx_bad_length_errors", offsetof(struct rte_eth_stats, ibadlen)},
132          {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
133          {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)},
134          {"fdir_match", offsetof(struct rte_eth_stats, fdirmatch)},
135          {"fdir_miss", offsetof(struct rte_eth_stats, fdirmiss)},
136          {"tx_flow_control_xon", offsetof(struct rte_eth_stats, tx_pause_xon)},
137          {"rx_flow_control_xon", offsetof(struct rte_eth_stats, rx_pause_xon)},
138          {"tx_flow_control_xoff", offsetof(struct rte_eth_stats, tx_pause_xoff)},
139          {"rx_flow_control_xoff", offsetof(struct rte_eth_stats, rx_pause_xoff)},
140 };
141 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
142
143 static struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
144         {"rx_packets", offsetof(struct rte_eth_stats, q_ipackets)},
145         {"rx_bytes", offsetof(struct rte_eth_stats, q_ibytes)},
146 };
147 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
148                 sizeof(rte_rxq_stats_strings[0]))
149
150 static struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
151         {"tx_packets", offsetof(struct rte_eth_stats, q_opackets)},
152         {"tx_bytes", offsetof(struct rte_eth_stats, q_obytes)},
153         {"tx_errors", offsetof(struct rte_eth_stats, q_errors)},
154 };
155 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
156                 sizeof(rte_txq_stats_strings[0]))
157
158
159 /**
160  * The user application callback description.
161  *
162  * It contains callback address to be registered by user application,
163  * the pointer to the parameters for callback, and the event type.
164  */
165 struct rte_eth_dev_callback {
166         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
167         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
168         void *cb_arg;                           /**< Parameter for callback */
169         enum rte_eth_event_type event;          /**< Interrupt event type */
170         uint32_t active;                        /**< Callback is executing */
171 };
172
173 enum {
174         STAT_QMAP_TX = 0,
175         STAT_QMAP_RX
176 };
177
178 enum {
179         DEV_DETACHED = 0,
180         DEV_ATTACHED
181 };
182
183 static inline void
184 rte_eth_dev_data_alloc(void)
185 {
186         const unsigned flags = 0;
187         const struct rte_memzone *mz;
188
189         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
190                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
191                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
192                                 rte_socket_id(), flags);
193         } else
194                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
195         if (mz == NULL)
196                 rte_panic("Cannot allocate memzone for ethernet port data\n");
197
198         rte_eth_dev_data = mz->addr;
199         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
200                 memset(rte_eth_dev_data, 0,
201                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
202 }
203
204 static struct rte_eth_dev *
205 rte_eth_dev_allocated(const char *name)
206 {
207         unsigned i;
208
209         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
210                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
211                     strcmp(rte_eth_devices[i].data->name, name) == 0)
212                         return &rte_eth_devices[i];
213         }
214         return NULL;
215 }
216
217 static uint8_t
218 rte_eth_dev_find_free_port(void)
219 {
220         unsigned i;
221
222         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
223                 if (rte_eth_devices[i].attached == DEV_DETACHED)
224                         return i;
225         }
226         return RTE_MAX_ETHPORTS;
227 }
228
229 struct rte_eth_dev *
230 rte_eth_dev_allocate(const char *name)
231 {
232         uint8_t port_id;
233         struct rte_eth_dev *eth_dev;
234
235         port_id = rte_eth_dev_find_free_port();
236         if (port_id == RTE_MAX_ETHPORTS) {
237                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
238                 return NULL;
239         }
240
241         if (rte_eth_dev_data == NULL)
242                 rte_eth_dev_data_alloc();
243
244         if (rte_eth_dev_allocated(name) != NULL) {
245                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n", name);
246                 return NULL;
247         }
248
249         eth_dev = &rte_eth_devices[port_id];
250         eth_dev->data = &rte_eth_dev_data[port_id];
251         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
252         eth_dev->data->port_id = port_id;
253         eth_dev->attached = DEV_ATTACHED;
254         nb_ports++;
255         return eth_dev;
256 }
257
258 int
259 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
260 {
261         if (eth_dev == NULL)
262                 return -EINVAL;
263
264         eth_dev->attached = 0;
265         nb_ports--;
266         return 0;
267 }
268
269 static int
270 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
271                  struct rte_pci_device *pci_dev)
272 {
273         struct eth_driver    *eth_drv;
274         struct rte_eth_dev *eth_dev;
275         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
276
277         int diag;
278
279         eth_drv = (struct eth_driver *)pci_drv;
280
281         /* Create unique Ethernet device name using PCI address */
282         snprintf(ethdev_name, RTE_ETH_NAME_MAX_LEN, "%d:%d.%d",
283                         pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
284
285         eth_dev = rte_eth_dev_allocate(ethdev_name);
286         if (eth_dev == NULL)
287                 return -ENOMEM;
288
289         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
290                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
291                                   eth_drv->dev_private_size,
292                                   RTE_CACHE_LINE_SIZE);
293                 if (eth_dev->data->dev_private == NULL)
294                         rte_panic("Cannot allocate memzone for private port data\n");
295         }
296         eth_dev->pci_dev = pci_dev;
297         eth_dev->driver = eth_drv;
298         eth_dev->data->rx_mbuf_alloc_failed = 0;
299
300         /* init user callbacks */
301         TAILQ_INIT(&(eth_dev->link_intr_cbs));
302
303         /*
304          * Set the default MTU.
305          */
306         eth_dev->data->mtu = ETHER_MTU;
307
308         /* Invoke PMD device initialization function */
309         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
310         if (diag == 0)
311                 return (0);
312
313         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
314                         " failed\n", pci_drv->name,
315                         (unsigned) pci_dev->id.vendor_id,
316                         (unsigned) pci_dev->id.device_id);
317         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
318                 rte_free(eth_dev->data->dev_private);
319         eth_dev->attached = DEV_DETACHED;
320         nb_ports--;
321         return diag;
322 }
323
324 /**
325  * Register an Ethernet [Poll Mode] driver.
326  *
327  * Function invoked by the initialization function of an Ethernet driver
328  * to simultaneously register itself as a PCI driver and as an Ethernet
329  * Poll Mode Driver.
330  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
331  * structure embedded in the *eth_drv* structure, after having stored the
332  * address of the rte_eth_dev_init() function in the *devinit* field of
333  * the *pci_drv* structure.
334  * During the PCI probing phase, the rte_eth_dev_init() function is
335  * invoked for each PCI [Ethernet device] matching the embedded PCI
336  * identifiers provided by the driver.
337  */
338 void
339 rte_eth_driver_register(struct eth_driver *eth_drv)
340 {
341         eth_drv->pci_drv.devinit = rte_eth_dev_init;
342         rte_eal_pci_register(&eth_drv->pci_drv);
343 }
344
345 static int
346 rte_eth_dev_is_valid_port(uint8_t port_id)
347 {
348         if (port_id >= RTE_MAX_ETHPORTS ||
349             rte_eth_devices[port_id].attached != DEV_ATTACHED)
350                 return 0;
351         else
352                 return 1;
353 }
354
355 int
356 rte_eth_dev_socket_id(uint8_t port_id)
357 {
358         if (!rte_eth_dev_is_valid_port(port_id))
359                 return -1;
360         return rte_eth_devices[port_id].pci_dev->numa_node;
361 }
362
363 uint8_t
364 rte_eth_dev_count(void)
365 {
366         return (nb_ports);
367 }
368
369 static int
370 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
371 {
372         uint16_t old_nb_queues = dev->data->nb_rx_queues;
373         void **rxq;
374         unsigned i;
375
376         if (dev->data->rx_queues == NULL) { /* first time configuration */
377                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
378                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
379                                 RTE_CACHE_LINE_SIZE);
380                 if (dev->data->rx_queues == NULL) {
381                         dev->data->nb_rx_queues = 0;
382                         return -(ENOMEM);
383                 }
384 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
385                 dev->post_rx_burst_cbs = rte_zmalloc(
386                         "ethdev->post_rx_burst_cbs",
387                         sizeof(*dev->post_rx_burst_cbs) * nb_queues,
388                         RTE_CACHE_LINE_SIZE);
389                 if (dev->post_rx_burst_cbs == NULL) {
390                         rte_free(dev->data->rx_queues);
391                         dev->data->rx_queues = NULL;
392                         dev->data->nb_rx_queues = 0;
393                         return -ENOMEM;
394                 }
395 #endif
396
397         } else { /* re-configure */
398                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
399
400                 rxq = dev->data->rx_queues;
401
402                 for (i = nb_queues; i < old_nb_queues; i++)
403                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
404                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
405                                 RTE_CACHE_LINE_SIZE);
406                 if (rxq == NULL)
407                         return -(ENOMEM);
408 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
409                 dev->post_rx_burst_cbs = rte_realloc(
410                         dev->post_rx_burst_cbs,
411                         sizeof(*dev->post_rx_burst_cbs) *
412                                 nb_queues, RTE_CACHE_LINE_SIZE);
413                 if (dev->post_rx_burst_cbs == NULL)
414                         return -ENOMEM;
415 #endif
416                 if (nb_queues > old_nb_queues) {
417                         uint16_t new_qs = nb_queues - old_nb_queues;
418                         memset(rxq + old_nb_queues, 0,
419                                 sizeof(rxq[0]) * new_qs);
420 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
421                         memset(dev->post_rx_burst_cbs + old_nb_queues, 0,
422                                 sizeof(dev->post_rx_burst_cbs[0]) * new_qs);
423 #endif
424                 }
425
426                 dev->data->rx_queues = rxq;
427
428         }
429         dev->data->nb_rx_queues = nb_queues;
430         return (0);
431 }
432
433 int
434 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
435 {
436         struct rte_eth_dev *dev;
437
438         /* This function is only safe when called from the primary process
439          * in a multi-process setup*/
440         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
441
442         if (!rte_eth_dev_is_valid_port(port_id)) {
443                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
444                 return -EINVAL;
445         }
446
447         dev = &rte_eth_devices[port_id];
448         if (rx_queue_id >= dev->data->nb_rx_queues) {
449                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
450                 return -EINVAL;
451         }
452
453         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
454
455         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
456
457 }
458
459 int
460 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
461 {
462         struct rte_eth_dev *dev;
463
464         /* This function is only safe when called from the primary process
465          * in a multi-process setup*/
466         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
467
468         if (!rte_eth_dev_is_valid_port(port_id)) {
469                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
470                 return -EINVAL;
471         }
472
473         dev = &rte_eth_devices[port_id];
474         if (rx_queue_id >= dev->data->nb_rx_queues) {
475                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
476                 return -EINVAL;
477         }
478
479         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
480
481         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
482
483 }
484
485 int
486 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
487 {
488         struct rte_eth_dev *dev;
489
490         /* This function is only safe when called from the primary process
491          * in a multi-process setup*/
492         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
493
494         if (!rte_eth_dev_is_valid_port(port_id)) {
495                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
496                 return -EINVAL;
497         }
498
499         dev = &rte_eth_devices[port_id];
500         if (tx_queue_id >= dev->data->nb_tx_queues) {
501                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
502                 return -EINVAL;
503         }
504
505         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
506
507         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
508
509 }
510
511 int
512 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
513 {
514         struct rte_eth_dev *dev;
515
516         /* This function is only safe when called from the primary process
517          * in a multi-process setup*/
518         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
519
520         if (!rte_eth_dev_is_valid_port(port_id)) {
521                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
522                 return -EINVAL;
523         }
524
525         dev = &rte_eth_devices[port_id];
526         if (tx_queue_id >= dev->data->nb_tx_queues) {
527                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
528                 return -EINVAL;
529         }
530
531         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
532
533         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
534
535 }
536
537 static int
538 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
539 {
540         uint16_t old_nb_queues = dev->data->nb_tx_queues;
541         void **txq;
542         unsigned i;
543
544         if (dev->data->tx_queues == NULL) { /* first time configuration */
545                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
546                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
547                                 RTE_CACHE_LINE_SIZE);
548                 if (dev->data->tx_queues == NULL) {
549                         dev->data->nb_tx_queues = 0;
550                         return -(ENOMEM);
551                 }
552 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
553                 dev->pre_tx_burst_cbs = rte_zmalloc(
554                         "ethdev->pre_tx_burst_cbs",
555                         sizeof(*dev->pre_tx_burst_cbs) * nb_queues,
556                         RTE_CACHE_LINE_SIZE);
557                 if (dev->pre_tx_burst_cbs == NULL) {
558                         rte_free(dev->data->tx_queues);
559                         dev->data->tx_queues = NULL;
560                         dev->data->nb_tx_queues = 0;
561                         return -ENOMEM;
562                 }
563 #endif
564
565         } else { /* re-configure */
566                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
567
568                 txq = dev->data->tx_queues;
569
570                 for (i = nb_queues; i < old_nb_queues; i++)
571                         (*dev->dev_ops->tx_queue_release)(txq[i]);
572                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
573                                 RTE_CACHE_LINE_SIZE);
574                 if (txq == NULL)
575                         return -ENOMEM;
576 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
577                 dev->pre_tx_burst_cbs = rte_realloc(
578                         dev->pre_tx_burst_cbs,
579                         sizeof(*dev->pre_tx_burst_cbs) *
580                                 nb_queues, RTE_CACHE_LINE_SIZE);
581                 if (dev->pre_tx_burst_cbs == NULL)
582                         return -ENOMEM;
583 #endif
584                 if (nb_queues > old_nb_queues) {
585                         uint16_t new_qs = nb_queues - old_nb_queues;
586                         memset(txq + old_nb_queues, 0,
587                                 sizeof(txq[0]) * new_qs);
588 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
589                         memset(dev->pre_tx_burst_cbs + old_nb_queues, 0,
590                                 sizeof(dev->pre_tx_burst_cbs[0]) * new_qs);
591 #endif
592                 }
593
594                 dev->data->tx_queues = txq;
595
596         }
597         dev->data->nb_tx_queues = nb_queues;
598         return (0);
599 }
600
601 static int
602 rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
603 {
604         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
605         switch (nb_rx_q) {
606         case 1:
607         case 2:
608                 RTE_ETH_DEV_SRIOV(dev).active =
609                         ETH_64_POOLS;
610                 break;
611         case 4:
612                 RTE_ETH_DEV_SRIOV(dev).active =
613                         ETH_32_POOLS;
614                 break;
615         default:
616                 return -EINVAL;
617         }
618
619         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
620         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
621                 dev->pci_dev->max_vfs * nb_rx_q;
622
623         return 0;
624 }
625
626 static int
627 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
628                       const struct rte_eth_conf *dev_conf)
629 {
630         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
631
632         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
633                 /* check multi-queue mode */
634                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
635                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
636                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
637                         /* SRIOV only works in VMDq enable mode */
638                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
639                                         " SRIOV active, "
640                                         "wrong VMDQ mq_mode rx %u tx %u\n",
641                                         port_id,
642                                         dev_conf->rxmode.mq_mode,
643                                         dev_conf->txmode.mq_mode);
644                         return (-EINVAL);
645                 }
646
647                 switch (dev_conf->rxmode.mq_mode) {
648                 case ETH_MQ_RX_VMDQ_DCB:
649                 case ETH_MQ_RX_VMDQ_DCB_RSS:
650                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
651                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
652                                         " SRIOV active, "
653                                         "unsupported VMDQ mq_mode rx %u\n",
654                                         port_id, dev_conf->rxmode.mq_mode);
655                         return (-EINVAL);
656                 case ETH_MQ_RX_RSS:
657                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
658                                         " SRIOV active, "
659                                         "Rx mq mode is changed from:"
660                                         "mq_mode %u into VMDQ mq_mode %u\n",
661                                         port_id,
662                                         dev_conf->rxmode.mq_mode,
663                                         dev->data->dev_conf.rxmode.mq_mode);
664                 case ETH_MQ_RX_VMDQ_RSS:
665                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
666                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
667                                 if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) {
668                                         PMD_DEBUG_TRACE("ethdev port_id=%d"
669                                                 " SRIOV active, invalid queue"
670                                                 " number for VMDQ RSS, allowed"
671                                                 " value are 1, 2 or 4\n",
672                                                 port_id);
673                                         return -EINVAL;
674                                 }
675                         break;
676                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
677                         /* if nothing mq mode configure, use default scheme */
678                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
679                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
680                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
681                         break;
682                 }
683
684                 switch (dev_conf->txmode.mq_mode) {
685                 case ETH_MQ_TX_VMDQ_DCB:
686                         /* DCB VMDQ in SRIOV mode, not implement yet */
687                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
688                                         " SRIOV active, "
689                                         "unsupported VMDQ mq_mode tx %u\n",
690                                         port_id, dev_conf->txmode.mq_mode);
691                         return (-EINVAL);
692                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
693                         /* if nothing mq mode configure, use default scheme */
694                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
695                         break;
696                 }
697
698                 /* check valid queue number */
699                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
700                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
701                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
702                                     "queue number must less equal to %d\n",
703                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
704                         return (-EINVAL);
705                 }
706         } else {
707                 /* For vmdb+dcb mode check our configuration before we go further */
708                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
709                         const struct rte_eth_vmdq_dcb_conf *conf;
710
711                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
712                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
713                                                 "!= %d\n",
714                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
715                                 return (-EINVAL);
716                         }
717                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
718                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
719                                conf->nb_queue_pools == ETH_32_POOLS)) {
720                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
721                                                 "nb_queue_pools must be %d or %d\n",
722                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
723                                 return (-EINVAL);
724                         }
725                 }
726                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
727                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
728
729                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
730                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
731                                                 "!= %d\n",
732                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
733                                 return (-EINVAL);
734                         }
735                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
736                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
737                                conf->nb_queue_pools == ETH_32_POOLS)) {
738                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
739                                                 "nb_queue_pools != %d or nb_queue_pools "
740                                                 "!= %d\n",
741                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
742                                 return (-EINVAL);
743                         }
744                 }
745
746                 /* For DCB mode check our configuration before we go further */
747                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
748                         const struct rte_eth_dcb_rx_conf *conf;
749
750                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
751                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
752                                                 "!= %d\n",
753                                                 port_id, ETH_DCB_NUM_QUEUES);
754                                 return (-EINVAL);
755                         }
756                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
757                         if (! (conf->nb_tcs == ETH_4_TCS ||
758                                conf->nb_tcs == ETH_8_TCS)) {
759                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
760                                                 "nb_tcs != %d or nb_tcs "
761                                                 "!= %d\n",
762                                                 port_id, ETH_4_TCS, ETH_8_TCS);
763                                 return (-EINVAL);
764                         }
765                 }
766
767                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
768                         const struct rte_eth_dcb_tx_conf *conf;
769
770                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
771                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
772                                                 "!= %d\n",
773                                                 port_id, ETH_DCB_NUM_QUEUES);
774                                 return (-EINVAL);
775                         }
776                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
777                         if (! (conf->nb_tcs == ETH_4_TCS ||
778                                conf->nb_tcs == ETH_8_TCS)) {
779                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
780                                                 "nb_tcs != %d or nb_tcs "
781                                                 "!= %d\n",
782                                                 port_id, ETH_4_TCS, ETH_8_TCS);
783                                 return (-EINVAL);
784                         }
785                 }
786         }
787         return 0;
788 }
789
790 int
791 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
792                       const struct rte_eth_conf *dev_conf)
793 {
794         struct rte_eth_dev *dev;
795         struct rte_eth_dev_info dev_info;
796         int diag;
797
798         /* This function is only safe when called from the primary process
799          * in a multi-process setup*/
800         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
801
802         if (!rte_eth_dev_is_valid_port(port_id)) {
803                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
804                 return (-EINVAL);
805         }
806
807         dev = &rte_eth_devices[port_id];
808
809         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
810         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
811
812         if (dev->data->dev_started) {
813                 PMD_DEBUG_TRACE(
814                     "port %d must be stopped to allow configuration\n", port_id);
815                 return (-EBUSY);
816         }
817
818         /*
819          * Check that the numbers of RX and TX queues are not greater
820          * than the maximum number of RX and TX queues supported by the
821          * configured device.
822          */
823         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
824         if (nb_rx_q > dev_info.max_rx_queues) {
825                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
826                                 port_id, nb_rx_q, dev_info.max_rx_queues);
827                 return (-EINVAL);
828         }
829         if (nb_rx_q == 0) {
830                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
831                 return (-EINVAL);
832         }
833
834         if (nb_tx_q > dev_info.max_tx_queues) {
835                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
836                                 port_id, nb_tx_q, dev_info.max_tx_queues);
837                 return (-EINVAL);
838         }
839         if (nb_tx_q == 0) {
840                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
841                 return (-EINVAL);
842         }
843
844         /* Copy the dev_conf parameter into the dev structure */
845         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
846
847         /*
848          * If link state interrupt is enabled, check that the
849          * device supports it.
850          */
851         if (dev_conf->intr_conf.lsc == 1) {
852                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
853
854                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
855                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
856                                         pci_drv->name);
857                         return (-EINVAL);
858                 }
859         }
860
861         /*
862          * If jumbo frames are enabled, check that the maximum RX packet
863          * length is supported by the configured device.
864          */
865         if (dev_conf->rxmode.jumbo_frame == 1) {
866                 if (dev_conf->rxmode.max_rx_pkt_len >
867                     dev_info.max_rx_pktlen) {
868                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
869                                 " > max valid value %u\n",
870                                 port_id,
871                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
872                                 (unsigned)dev_info.max_rx_pktlen);
873                         return (-EINVAL);
874                 }
875                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
876                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
877                                 " < min valid value %u\n",
878                                 port_id,
879                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
880                                 (unsigned)ETHER_MIN_LEN);
881                         return (-EINVAL);
882                 }
883         } else {
884                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
885                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
886                         /* Use default value */
887                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
888                                                         ETHER_MAX_LEN;
889         }
890
891         /* multipe queue mode checking */
892         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
893         if (diag != 0) {
894                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
895                                 port_id, diag);
896                 return diag;
897         }
898
899         /*
900          * Setup new number of RX/TX queues and reconfigure device.
901          */
902         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
903         if (diag != 0) {
904                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
905                                 port_id, diag);
906                 return diag;
907         }
908
909         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
910         if (diag != 0) {
911                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
912                                 port_id, diag);
913                 rte_eth_dev_rx_queue_config(dev, 0);
914                 return diag;
915         }
916
917         diag = (*dev->dev_ops->dev_configure)(dev);
918         if (diag != 0) {
919                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
920                                 port_id, diag);
921                 rte_eth_dev_rx_queue_config(dev, 0);
922                 rte_eth_dev_tx_queue_config(dev, 0);
923                 return diag;
924         }
925
926         return 0;
927 }
928
929 static void
930 rte_eth_dev_config_restore(uint8_t port_id)
931 {
932         struct rte_eth_dev *dev;
933         struct rte_eth_dev_info dev_info;
934         struct ether_addr addr;
935         uint16_t i;
936         uint32_t pool = 0;
937
938         dev = &rte_eth_devices[port_id];
939
940         rte_eth_dev_info_get(port_id, &dev_info);
941
942         if (RTE_ETH_DEV_SRIOV(dev).active)
943                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
944
945         /* replay MAC address configuration */
946         for (i = 0; i < dev_info.max_mac_addrs; i++) {
947                 addr = dev->data->mac_addrs[i];
948
949                 /* skip zero address */
950                 if (is_zero_ether_addr(&addr))
951                         continue;
952
953                 /* add address to the hardware */
954                 if  (*dev->dev_ops->mac_addr_add &&
955                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
956                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
957                 else {
958                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
959                                         port_id);
960                         /* exit the loop but not return an error */
961                         break;
962                 }
963         }
964
965         /* replay promiscuous configuration */
966         if (rte_eth_promiscuous_get(port_id) == 1)
967                 rte_eth_promiscuous_enable(port_id);
968         else if (rte_eth_promiscuous_get(port_id) == 0)
969                 rte_eth_promiscuous_disable(port_id);
970
971         /* replay allmulticast configuration */
972         if (rte_eth_allmulticast_get(port_id) == 1)
973                 rte_eth_allmulticast_enable(port_id);
974         else if (rte_eth_allmulticast_get(port_id) == 0)
975                 rte_eth_allmulticast_disable(port_id);
976 }
977
978 int
979 rte_eth_dev_start(uint8_t port_id)
980 {
981         struct rte_eth_dev *dev;
982         int diag;
983
984         /* This function is only safe when called from the primary process
985          * in a multi-process setup*/
986         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
987
988         if (!rte_eth_dev_is_valid_port(port_id)) {
989                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
990                 return (-EINVAL);
991         }
992
993         dev = &rte_eth_devices[port_id];
994
995         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
996
997         if (dev->data->dev_started != 0) {
998                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
999                         " already started\n",
1000                         port_id);
1001                 return (0);
1002         }
1003
1004         diag = (*dev->dev_ops->dev_start)(dev);
1005         if (diag == 0)
1006                 dev->data->dev_started = 1;
1007         else
1008                 return diag;
1009
1010         rte_eth_dev_config_restore(port_id);
1011
1012         if (dev->data->dev_conf.intr_conf.lsc != 0) {
1013                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1014                 (*dev->dev_ops->link_update)(dev, 0);
1015         }
1016         return 0;
1017 }
1018
1019 void
1020 rte_eth_dev_stop(uint8_t port_id)
1021 {
1022         struct rte_eth_dev *dev;
1023
1024         /* This function is only safe when called from the primary process
1025          * in a multi-process setup*/
1026         PROC_PRIMARY_OR_RET();
1027
1028         if (!rte_eth_dev_is_valid_port(port_id)) {
1029                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1030                 return;
1031         }
1032
1033         dev = &rte_eth_devices[port_id];
1034
1035         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1036
1037         if (dev->data->dev_started == 0) {
1038                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1039                         " already stopped\n",
1040                         port_id);
1041                 return;
1042         }
1043
1044         dev->data->dev_started = 0;
1045         (*dev->dev_ops->dev_stop)(dev);
1046 }
1047
1048 int
1049 rte_eth_dev_set_link_up(uint8_t port_id)
1050 {
1051         struct rte_eth_dev *dev;
1052
1053         /* This function is only safe when called from the primary process
1054          * in a multi-process setup*/
1055         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1056
1057         if (!rte_eth_dev_is_valid_port(port_id)) {
1058                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1059                 return -EINVAL;
1060         }
1061
1062         dev = &rte_eth_devices[port_id];
1063
1064         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1065         return (*dev->dev_ops->dev_set_link_up)(dev);
1066 }
1067
1068 int
1069 rte_eth_dev_set_link_down(uint8_t port_id)
1070 {
1071         struct rte_eth_dev *dev;
1072
1073         /* This function is only safe when called from the primary process
1074          * in a multi-process setup*/
1075         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1076
1077         if (!rte_eth_dev_is_valid_port(port_id)) {
1078                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1079                 return -EINVAL;
1080         }
1081
1082         dev = &rte_eth_devices[port_id];
1083
1084         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1085         return (*dev->dev_ops->dev_set_link_down)(dev);
1086 }
1087
1088 void
1089 rte_eth_dev_close(uint8_t port_id)
1090 {
1091         struct rte_eth_dev *dev;
1092
1093         /* This function is only safe when called from the primary process
1094          * in a multi-process setup*/
1095         PROC_PRIMARY_OR_RET();
1096
1097         if (!rte_eth_dev_is_valid_port(port_id)) {
1098                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1099                 return;
1100         }
1101
1102         dev = &rte_eth_devices[port_id];
1103
1104         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1105         dev->data->dev_started = 0;
1106         (*dev->dev_ops->dev_close)(dev);
1107 }
1108
1109 int
1110 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1111                        uint16_t nb_rx_desc, unsigned int socket_id,
1112                        const struct rte_eth_rxconf *rx_conf,
1113                        struct rte_mempool *mp)
1114 {
1115         int ret;
1116         uint32_t mbp_buf_size;
1117         struct rte_eth_dev *dev;
1118         struct rte_pktmbuf_pool_private *mbp_priv;
1119         struct rte_eth_dev_info dev_info;
1120
1121         /* This function is only safe when called from the primary process
1122          * in a multi-process setup*/
1123         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1124
1125         if (!rte_eth_dev_is_valid_port(port_id)) {
1126                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1127                 return (-EINVAL);
1128         }
1129
1130         dev = &rte_eth_devices[port_id];
1131         if (rx_queue_id >= dev->data->nb_rx_queues) {
1132                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1133                 return (-EINVAL);
1134         }
1135
1136         if (dev->data->dev_started) {
1137                 PMD_DEBUG_TRACE(
1138                     "port %d must be stopped to allow configuration\n", port_id);
1139                 return -EBUSY;
1140         }
1141
1142         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1143         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1144
1145         /*
1146          * Check the size of the mbuf data buffer.
1147          * This value must be provided in the private data of the memory pool.
1148          * First check that the memory pool has a valid private data.
1149          */
1150         rte_eth_dev_info_get(port_id, &dev_info);
1151         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1152                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1153                                 mp->name, (int) mp->private_data_size,
1154                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1155                 return (-ENOSPC);
1156         }
1157         mbp_priv = rte_mempool_get_priv(mp);
1158         mbp_buf_size = mbp_priv->mbuf_data_room_size;
1159
1160         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1161                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1162                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1163                                 "=%d)\n",
1164                                 mp->name,
1165                                 (int)mbp_buf_size,
1166                                 (int)(RTE_PKTMBUF_HEADROOM +
1167                                       dev_info.min_rx_bufsize),
1168                                 (int)RTE_PKTMBUF_HEADROOM,
1169                                 (int)dev_info.min_rx_bufsize);
1170                 return (-EINVAL);
1171         }
1172
1173         if (rx_conf == NULL)
1174                 rx_conf = &dev_info.default_rxconf;
1175
1176         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1177                                               socket_id, rx_conf, mp);
1178         if (!ret) {
1179                 if (!dev->data->min_rx_buf_size ||
1180                     dev->data->min_rx_buf_size > mbp_buf_size)
1181                         dev->data->min_rx_buf_size = mbp_buf_size;
1182         }
1183
1184         return ret;
1185 }
1186
1187 int
1188 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1189                        uint16_t nb_tx_desc, unsigned int socket_id,
1190                        const struct rte_eth_txconf *tx_conf)
1191 {
1192         struct rte_eth_dev *dev;
1193         struct rte_eth_dev_info dev_info;
1194
1195         /* This function is only safe when called from the primary process
1196          * in a multi-process setup*/
1197         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1198
1199         if (!rte_eth_dev_is_valid_port(port_id)) {
1200                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1201                 return (-EINVAL);
1202         }
1203
1204         dev = &rte_eth_devices[port_id];
1205         if (tx_queue_id >= dev->data->nb_tx_queues) {
1206                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1207                 return (-EINVAL);
1208         }
1209
1210         if (dev->data->dev_started) {
1211                 PMD_DEBUG_TRACE(
1212                     "port %d must be stopped to allow configuration\n", port_id);
1213                 return -EBUSY;
1214         }
1215
1216         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1217         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1218
1219         rte_eth_dev_info_get(port_id, &dev_info);
1220
1221         if (tx_conf == NULL)
1222                 tx_conf = &dev_info.default_txconf;
1223
1224         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1225                                                socket_id, tx_conf);
1226 }
1227
1228 void
1229 rte_eth_promiscuous_enable(uint8_t port_id)
1230 {
1231         struct rte_eth_dev *dev;
1232
1233         if (!rte_eth_dev_is_valid_port(port_id)) {
1234                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1235                 return;
1236         }
1237
1238         dev = &rte_eth_devices[port_id];
1239
1240         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1241         (*dev->dev_ops->promiscuous_enable)(dev);
1242         dev->data->promiscuous = 1;
1243 }
1244
1245 void
1246 rte_eth_promiscuous_disable(uint8_t port_id)
1247 {
1248         struct rte_eth_dev *dev;
1249
1250         if (!rte_eth_dev_is_valid_port(port_id)) {
1251                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1252                 return;
1253         }
1254
1255         dev = &rte_eth_devices[port_id];
1256
1257         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1258         dev->data->promiscuous = 0;
1259         (*dev->dev_ops->promiscuous_disable)(dev);
1260 }
1261
1262 int
1263 rte_eth_promiscuous_get(uint8_t port_id)
1264 {
1265         struct rte_eth_dev *dev;
1266
1267         if (!rte_eth_dev_is_valid_port(port_id)) {
1268                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1269                 return -1;
1270         }
1271
1272         dev = &rte_eth_devices[port_id];
1273         return dev->data->promiscuous;
1274 }
1275
1276 void
1277 rte_eth_allmulticast_enable(uint8_t port_id)
1278 {
1279         struct rte_eth_dev *dev;
1280
1281         if (!rte_eth_dev_is_valid_port(port_id)) {
1282                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1283                 return;
1284         }
1285
1286         dev = &rte_eth_devices[port_id];
1287
1288         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1289         (*dev->dev_ops->allmulticast_enable)(dev);
1290         dev->data->all_multicast = 1;
1291 }
1292
1293 void
1294 rte_eth_allmulticast_disable(uint8_t port_id)
1295 {
1296         struct rte_eth_dev *dev;
1297
1298         if (!rte_eth_dev_is_valid_port(port_id)) {
1299                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1300                 return;
1301         }
1302
1303         dev = &rte_eth_devices[port_id];
1304
1305         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1306         dev->data->all_multicast = 0;
1307         (*dev->dev_ops->allmulticast_disable)(dev);
1308 }
1309
1310 int
1311 rte_eth_allmulticast_get(uint8_t port_id)
1312 {
1313         struct rte_eth_dev *dev;
1314
1315         if (!rte_eth_dev_is_valid_port(port_id)) {
1316                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1317                 return -1;
1318         }
1319
1320         dev = &rte_eth_devices[port_id];
1321         return dev->data->all_multicast;
1322 }
1323
1324 static inline int
1325 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1326                                 struct rte_eth_link *link)
1327 {
1328         struct rte_eth_link *dst = link;
1329         struct rte_eth_link *src = &(dev->data->dev_link);
1330
1331         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1332                                         *(uint64_t *)src) == 0)
1333                 return -1;
1334
1335         return 0;
1336 }
1337
1338 void
1339 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1340 {
1341         struct rte_eth_dev *dev;
1342
1343         if (!rte_eth_dev_is_valid_port(port_id)) {
1344                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1345                 return;
1346         }
1347
1348         dev = &rte_eth_devices[port_id];
1349
1350         if (dev->data->dev_conf.intr_conf.lsc != 0)
1351                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1352         else {
1353                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1354                 (*dev->dev_ops->link_update)(dev, 1);
1355                 *eth_link = dev->data->dev_link;
1356         }
1357 }
1358
1359 void
1360 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1361 {
1362         struct rte_eth_dev *dev;
1363
1364         if (!rte_eth_dev_is_valid_port(port_id)) {
1365                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1366                 return;
1367         }
1368
1369         dev = &rte_eth_devices[port_id];
1370
1371         if (dev->data->dev_conf.intr_conf.lsc != 0)
1372                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1373         else {
1374                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1375                 (*dev->dev_ops->link_update)(dev, 0);
1376                 *eth_link = dev->data->dev_link;
1377         }
1378 }
1379
1380 int
1381 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1382 {
1383         struct rte_eth_dev *dev;
1384
1385         if (!rte_eth_dev_is_valid_port(port_id)) {
1386                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1387                 return (-ENODEV);
1388         }
1389
1390         dev = &rte_eth_devices[port_id];
1391         memset(stats, 0, sizeof(*stats));
1392
1393         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1394         (*dev->dev_ops->stats_get)(dev, stats);
1395         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1396         return 0;
1397 }
1398
1399 void
1400 rte_eth_stats_reset(uint8_t port_id)
1401 {
1402         struct rte_eth_dev *dev;
1403
1404         if (!rte_eth_dev_is_valid_port(port_id)) {
1405                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1406                 return;
1407         }
1408
1409         dev = &rte_eth_devices[port_id];
1410
1411         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1412         (*dev->dev_ops->stats_reset)(dev);
1413 }
1414
1415 /* retrieve ethdev extended statistics */
1416 int
1417 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1418         unsigned n)
1419 {
1420         struct rte_eth_stats eth_stats;
1421         struct rte_eth_dev *dev;
1422         unsigned count, i, q;
1423         uint64_t val;
1424         char *stats_ptr;
1425
1426         if (!rte_eth_dev_is_valid_port(port_id)) {
1427                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1428                 return -1;
1429         }
1430
1431         dev = &rte_eth_devices[port_id];
1432
1433         /* implemented by the driver */
1434         if (dev->dev_ops->xstats_get != NULL)
1435                 return (*dev->dev_ops->xstats_get)(dev, xstats, n);
1436
1437         /* else, return generic statistics */
1438         count = RTE_NB_STATS;
1439         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1440         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1441         if (n < count)
1442                 return count;
1443
1444         /* now fill the xstats structure */
1445
1446         count = 0;
1447         memset(&eth_stats, 0, sizeof(eth_stats));
1448         rte_eth_stats_get(port_id, &eth_stats);
1449
1450         /* global stats */
1451         for (i = 0; i < RTE_NB_STATS; i++) {
1452                 stats_ptr = (char *)&eth_stats + rte_stats_strings[i].offset;
1453                 val = *(uint64_t *)stats_ptr;
1454                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1455                         "%s", rte_stats_strings[i].name);
1456                 xstats[count++].value = val;
1457         }
1458
1459         /* per-rxq stats */
1460         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1461                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1462                         stats_ptr = (char *)&eth_stats;
1463                         stats_ptr += rte_rxq_stats_strings[i].offset;
1464                         stats_ptr += q * sizeof(uint64_t);
1465                         val = *(uint64_t *)stats_ptr;
1466                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1467                                 "rx_queue_%u_%s", q,
1468                                 rte_rxq_stats_strings[i].name);
1469                         xstats[count++].value = val;
1470                 }
1471         }
1472
1473         /* per-txq stats */
1474         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1475                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1476                         stats_ptr = (char *)&eth_stats;
1477                         stats_ptr += rte_txq_stats_strings[i].offset;
1478                         stats_ptr += q * sizeof(uint64_t);
1479                         val = *(uint64_t *)stats_ptr;
1480                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1481                                 "tx_queue_%u_%s", q,
1482                                 rte_txq_stats_strings[i].name);
1483                         xstats[count++].value = val;
1484                 }
1485         }
1486
1487         return count;
1488 }
1489
1490 /* reset ethdev extended statistics */
1491 void
1492 rte_eth_xstats_reset(uint8_t port_id)
1493 {
1494         struct rte_eth_dev *dev;
1495
1496         if (!rte_eth_dev_is_valid_port(port_id)) {
1497                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1498                 return;
1499         }
1500
1501         dev = &rte_eth_devices[port_id];
1502
1503         /* implemented by the driver */
1504         if (dev->dev_ops->xstats_reset != NULL) {
1505                 (*dev->dev_ops->xstats_reset)(dev);
1506                 return;
1507         }
1508
1509         /* fallback to default */
1510         rte_eth_stats_reset(port_id);
1511 }
1512
1513 static int
1514 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1515                 uint8_t is_rx)
1516 {
1517         struct rte_eth_dev *dev;
1518
1519         if (!rte_eth_dev_is_valid_port(port_id)) {
1520                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1521                 return -ENODEV;
1522         }
1523
1524         dev = &rte_eth_devices[port_id];
1525
1526         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1527         return (*dev->dev_ops->queue_stats_mapping_set)
1528                         (dev, queue_id, stat_idx, is_rx);
1529 }
1530
1531
1532 int
1533 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1534                 uint8_t stat_idx)
1535 {
1536         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1537                         STAT_QMAP_TX);
1538 }
1539
1540
1541 int
1542 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1543                 uint8_t stat_idx)
1544 {
1545         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1546                         STAT_QMAP_RX);
1547 }
1548
1549
1550 void
1551 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1552 {
1553         struct rte_eth_dev *dev;
1554
1555         if (!rte_eth_dev_is_valid_port(port_id)) {
1556                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1557                 return;
1558         }
1559
1560         dev = &rte_eth_devices[port_id];
1561
1562         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1563
1564         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1565         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1566         dev_info->pci_dev = dev->pci_dev;
1567         if (dev->driver)
1568                 dev_info->driver_name = dev->driver->pci_drv.name;
1569 }
1570
1571 void
1572 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1573 {
1574         struct rte_eth_dev *dev;
1575
1576         if (!rte_eth_dev_is_valid_port(port_id)) {
1577                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1578                 return;
1579         }
1580
1581         dev = &rte_eth_devices[port_id];
1582         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1583 }
1584
1585
1586 int
1587 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1588 {
1589         struct rte_eth_dev *dev;
1590
1591         if (!rte_eth_dev_is_valid_port(port_id)) {
1592                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1593                 return (-ENODEV);
1594         }
1595
1596         dev = &rte_eth_devices[port_id];
1597         *mtu = dev->data->mtu;
1598         return 0;
1599 }
1600
1601 int
1602 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1603 {
1604         int ret;
1605         struct rte_eth_dev *dev;
1606
1607         if (!rte_eth_dev_is_valid_port(port_id)) {
1608                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1609                 return (-ENODEV);
1610         }
1611
1612         dev = &rte_eth_devices[port_id];
1613         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1614
1615         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1616         if (!ret)
1617                 dev->data->mtu = mtu;
1618
1619         return ret;
1620 }
1621
1622 int
1623 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1624 {
1625         struct rte_eth_dev *dev;
1626
1627         if (!rte_eth_dev_is_valid_port(port_id)) {
1628                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1629                 return (-ENODEV);
1630         }
1631
1632         dev = &rte_eth_devices[port_id];
1633         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1634                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1635                 return (-ENOSYS);
1636         }
1637
1638         if (vlan_id > 4095) {
1639                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1640                                 port_id, (unsigned) vlan_id);
1641                 return (-EINVAL);
1642         }
1643         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1644         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1645         return (0);
1646 }
1647
1648 int
1649 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1650 {
1651         struct rte_eth_dev *dev;
1652
1653         if (!rte_eth_dev_is_valid_port(port_id)) {
1654                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1655                 return (-ENODEV);
1656         }
1657
1658         dev = &rte_eth_devices[port_id];
1659         if (rx_queue_id >= dev->data->nb_rx_queues) {
1660                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1661                 return (-EINVAL);
1662         }
1663
1664         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1665         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1666
1667         return (0);
1668 }
1669
1670 int
1671 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1672 {
1673         struct rte_eth_dev *dev;
1674
1675         if (!rte_eth_dev_is_valid_port(port_id)) {
1676                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1677                 return (-ENODEV);
1678         }
1679
1680         dev = &rte_eth_devices[port_id];
1681         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1682         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1683
1684         return (0);
1685 }
1686
1687 int
1688 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1689 {
1690         struct rte_eth_dev *dev;
1691         int ret = 0;
1692         int mask = 0;
1693         int cur, org = 0;
1694
1695         if (!rte_eth_dev_is_valid_port(port_id)) {
1696                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1697                 return (-ENODEV);
1698         }
1699
1700         dev = &rte_eth_devices[port_id];
1701
1702         /*check which option changed by application*/
1703         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1704         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1705         if (cur != org){
1706                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1707                 mask |= ETH_VLAN_STRIP_MASK;
1708         }
1709
1710         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1711         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1712         if (cur != org){
1713                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1714                 mask |= ETH_VLAN_FILTER_MASK;
1715         }
1716
1717         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1718         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1719         if (cur != org){
1720                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1721                 mask |= ETH_VLAN_EXTEND_MASK;
1722         }
1723
1724         /*no change*/
1725         if(mask == 0)
1726                 return ret;
1727
1728         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1729         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1730
1731         return ret;
1732 }
1733
1734 int
1735 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1736 {
1737         struct rte_eth_dev *dev;
1738         int ret = 0;
1739
1740         if (!rte_eth_dev_is_valid_port(port_id)) {
1741                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1742                 return (-ENODEV);
1743         }
1744
1745         dev = &rte_eth_devices[port_id];
1746
1747         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1748                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1749
1750         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1751                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1752
1753         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1754                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1755
1756         return ret;
1757 }
1758
1759 int
1760 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1761 {
1762         struct rte_eth_dev *dev;
1763
1764         if (!rte_eth_dev_is_valid_port(port_id)) {
1765                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1766                 return (-ENODEV);
1767         }
1768
1769         dev = &rte_eth_devices[port_id];
1770         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1771         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1772
1773         return 0;
1774 }
1775
1776 int
1777 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1778                                       struct rte_fdir_filter *fdir_filter,
1779                                       uint8_t queue)
1780 {
1781         struct rte_eth_dev *dev;
1782
1783         if (!rte_eth_dev_is_valid_port(port_id)) {
1784                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1785                 return (-ENODEV);
1786         }
1787
1788         dev = &rte_eth_devices[port_id];
1789
1790         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1791                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1792                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1793                 return (-ENOSYS);
1794         }
1795
1796         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1797              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1798             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1799                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1800                                 "None l4type, source & destinations ports " \
1801                                 "should be null!\n");
1802                 return (-EINVAL);
1803         }
1804
1805         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1806         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1807                                                                 queue);
1808 }
1809
1810 int
1811 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1812                                          struct rte_fdir_filter *fdir_filter,
1813                                          uint8_t queue)
1814 {
1815         struct rte_eth_dev *dev;
1816
1817         if (!rte_eth_dev_is_valid_port(port_id)) {
1818                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1819                 return (-ENODEV);
1820         }
1821
1822         dev = &rte_eth_devices[port_id];
1823
1824         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1825                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1826                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1827                 return (-ENOSYS);
1828         }
1829
1830         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1831              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1832             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1833                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1834                                 "None l4type, source & destinations ports " \
1835                                 "should be null!\n");
1836                 return (-EINVAL);
1837         }
1838
1839         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1840         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1841                                                                 queue);
1842
1843 }
1844
1845 int
1846 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1847                                          struct rte_fdir_filter *fdir_filter)
1848 {
1849         struct rte_eth_dev *dev;
1850
1851         if (!rte_eth_dev_is_valid_port(port_id)) {
1852                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1853                 return (-ENODEV);
1854         }
1855
1856         dev = &rte_eth_devices[port_id];
1857
1858         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1859                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1860                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1861                 return (-ENOSYS);
1862         }
1863
1864         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1865              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1866             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1867                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1868                                 "None l4type source & destinations ports " \
1869                                 "should be null!\n");
1870                 return (-EINVAL);
1871         }
1872
1873         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1874         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1875 }
1876
1877 int
1878 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1879 {
1880         struct rte_eth_dev *dev;
1881
1882         if (!rte_eth_dev_is_valid_port(port_id)) {
1883                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1884                 return (-ENODEV);
1885         }
1886
1887         dev = &rte_eth_devices[port_id];
1888         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1889                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1890                 return (-ENOSYS);
1891         }
1892
1893         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1894
1895         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1896         return (0);
1897 }
1898
1899 int
1900 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1901                                     struct rte_fdir_filter *fdir_filter,
1902                                     uint16_t soft_id, uint8_t queue,
1903                                     uint8_t drop)
1904 {
1905         struct rte_eth_dev *dev;
1906
1907         if (!rte_eth_dev_is_valid_port(port_id)) {
1908                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1909                 return (-ENODEV);
1910         }
1911
1912         dev = &rte_eth_devices[port_id];
1913
1914         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1915                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1916                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1917                 return (-ENOSYS);
1918         }
1919
1920         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1921              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1922             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1923                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1924                                 "None l4type, source & destinations ports " \
1925                                 "should be null!\n");
1926                 return (-EINVAL);
1927         }
1928
1929         /* For now IPv6 is not supported with perfect filter */
1930         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1931                 return (-ENOTSUP);
1932
1933         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1934         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1935                                                                 soft_id, queue,
1936                                                                 drop);
1937 }
1938
1939 int
1940 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
1941                                        struct rte_fdir_filter *fdir_filter,
1942                                        uint16_t soft_id, uint8_t queue,
1943                                        uint8_t drop)
1944 {
1945         struct rte_eth_dev *dev;
1946
1947         if (!rte_eth_dev_is_valid_port(port_id)) {
1948                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1949                 return (-ENODEV);
1950         }
1951
1952         dev = &rte_eth_devices[port_id];
1953
1954         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1955                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1956                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1957                 return (-ENOSYS);
1958         }
1959
1960         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1961              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1962             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1963                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1964                                 "None l4type, source & destinations ports " \
1965                                 "should be null!\n");
1966                 return (-EINVAL);
1967         }
1968
1969         /* For now IPv6 is not supported with perfect filter */
1970         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1971                 return (-ENOTSUP);
1972
1973         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
1974         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
1975                                                         soft_id, queue, drop);
1976 }
1977
1978 int
1979 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
1980                                        struct rte_fdir_filter *fdir_filter,
1981                                        uint16_t soft_id)
1982 {
1983         struct rte_eth_dev *dev;
1984
1985         if (!rte_eth_dev_is_valid_port(port_id)) {
1986                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1987                 return (-ENODEV);
1988         }
1989
1990         dev = &rte_eth_devices[port_id];
1991
1992         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1993                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1994                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1995                 return (-ENOSYS);
1996         }
1997
1998         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1999              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2000             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2001                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2002                                 "None l4type, source & destinations ports " \
2003                                 "should be null!\n");
2004                 return (-EINVAL);
2005         }
2006
2007         /* For now IPv6 is not supported with perfect filter */
2008         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2009                 return (-ENOTSUP);
2010
2011         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
2012         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
2013                                                                 soft_id);
2014 }
2015
2016 int
2017 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
2018 {
2019         struct rte_eth_dev *dev;
2020
2021         if (!rte_eth_dev_is_valid_port(port_id)) {
2022                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2023                 return (-ENODEV);
2024         }
2025
2026         dev = &rte_eth_devices[port_id];
2027         if (! (dev->data->dev_conf.fdir_conf.mode)) {
2028                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2029                 return (-ENOSYS);
2030         }
2031
2032         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
2033         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
2034 }
2035
2036 int
2037 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2038 {
2039         struct rte_eth_dev *dev;
2040
2041         if (!rte_eth_dev_is_valid_port(port_id)) {
2042                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2043                 return (-ENODEV);
2044         }
2045
2046         dev = &rte_eth_devices[port_id];
2047         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2048         memset(fc_conf, 0, sizeof(*fc_conf));
2049         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2050 }
2051
2052 int
2053 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2054 {
2055         struct rte_eth_dev *dev;
2056
2057         if (!rte_eth_dev_is_valid_port(port_id)) {
2058                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2059                 return (-ENODEV);
2060         }
2061
2062         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2063                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2064                 return (-EINVAL);
2065         }
2066
2067         dev = &rte_eth_devices[port_id];
2068         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2069         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2070 }
2071
2072 int
2073 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
2074 {
2075         struct rte_eth_dev *dev;
2076
2077         if (!rte_eth_dev_is_valid_port(port_id)) {
2078                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2079                 return (-ENODEV);
2080         }
2081
2082         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2083                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2084                 return (-EINVAL);
2085         }
2086
2087         dev = &rte_eth_devices[port_id];
2088         /* High water, low water validation are device specific */
2089         if  (*dev->dev_ops->priority_flow_ctrl_set)
2090                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2091         return (-ENOTSUP);
2092 }
2093
2094 static inline int
2095 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2096                         uint16_t reta_size)
2097 {
2098         uint16_t i, num;
2099
2100         if (!reta_conf)
2101                 return -EINVAL;
2102
2103         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
2104                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
2105                                                         RTE_RETA_GROUP_SIZE);
2106                 return -EINVAL;
2107         }
2108
2109         num = reta_size / RTE_RETA_GROUP_SIZE;
2110         for (i = 0; i < num; i++) {
2111                 if (reta_conf[i].mask)
2112                         return 0;
2113         }
2114
2115         return -EINVAL;
2116 }
2117
2118 static inline int
2119 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2120                          uint16_t reta_size,
2121                          uint8_t max_rxq)
2122 {
2123         uint16_t i, idx, shift;
2124
2125         if (!reta_conf)
2126                 return -EINVAL;
2127
2128         if (max_rxq == 0) {
2129                 PMD_DEBUG_TRACE("No receive queue is available\n");
2130                 return -EINVAL;
2131         }
2132
2133         for (i = 0; i < reta_size; i++) {
2134                 idx = i / RTE_RETA_GROUP_SIZE;
2135                 shift = i % RTE_RETA_GROUP_SIZE;
2136                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2137                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2138                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2139                                 "the maximum rxq index: %u\n", idx, shift,
2140                                 reta_conf[idx].reta[shift], max_rxq);
2141                         return -EINVAL;
2142                 }
2143         }
2144
2145         return 0;
2146 }
2147
2148 int
2149 rte_eth_dev_rss_reta_update(uint8_t port_id,
2150                             struct rte_eth_rss_reta_entry64 *reta_conf,
2151                             uint16_t reta_size)
2152 {
2153         struct rte_eth_dev *dev;
2154         int ret;
2155
2156         if (!rte_eth_dev_is_valid_port(port_id)) {
2157                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2158                 return -ENODEV;
2159         }
2160
2161         /* Check mask bits */
2162         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2163         if (ret < 0)
2164                 return ret;
2165
2166         dev = &rte_eth_devices[port_id];
2167
2168         /* Check entry value */
2169         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2170                                 dev->data->nb_rx_queues);
2171         if (ret < 0)
2172                 return ret;
2173
2174         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2175         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2176 }
2177
2178 int
2179 rte_eth_dev_rss_reta_query(uint8_t port_id,
2180                            struct rte_eth_rss_reta_entry64 *reta_conf,
2181                            uint16_t reta_size)
2182 {
2183         struct rte_eth_dev *dev;
2184         int ret;
2185
2186         if (port_id >= nb_ports) {
2187                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2188                 return -ENODEV;
2189         }
2190
2191         /* Check mask bits */
2192         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2193         if (ret < 0)
2194                 return ret;
2195
2196         dev = &rte_eth_devices[port_id];
2197         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2198         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2199 }
2200
2201 int
2202 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2203 {
2204         struct rte_eth_dev *dev;
2205         uint16_t rss_hash_protos;
2206
2207         if (!rte_eth_dev_is_valid_port(port_id)) {
2208                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2209                 return (-ENODEV);
2210         }
2211
2212         rss_hash_protos = rss_conf->rss_hf;
2213         if ((rss_hash_protos != 0) &&
2214             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2215                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2216                                 rss_hash_protos);
2217                 return (-EINVAL);
2218         }
2219         dev = &rte_eth_devices[port_id];
2220         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2221         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2222 }
2223
2224 int
2225 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2226                               struct rte_eth_rss_conf *rss_conf)
2227 {
2228         struct rte_eth_dev *dev;
2229
2230         if (!rte_eth_dev_is_valid_port(port_id)) {
2231                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2232                 return (-ENODEV);
2233         }
2234
2235         dev = &rte_eth_devices[port_id];
2236         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2237         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2238 }
2239
2240 int
2241 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2242                            struct rte_eth_udp_tunnel *udp_tunnel)
2243 {
2244         struct rte_eth_dev *dev;
2245
2246         if (!rte_eth_dev_is_valid_port(port_id)) {
2247                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2248                 return -ENODEV;
2249         }
2250
2251         if (udp_tunnel == NULL) {
2252                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2253                 return -EINVAL;
2254         }
2255
2256         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2257                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2258                 return -EINVAL;
2259         }
2260
2261         dev = &rte_eth_devices[port_id];
2262         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2263         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2264 }
2265
2266 int
2267 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2268                               struct rte_eth_udp_tunnel *udp_tunnel)
2269 {
2270         struct rte_eth_dev *dev;
2271
2272         if (!rte_eth_dev_is_valid_port(port_id)) {
2273                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2274                 return -ENODEV;
2275         }
2276
2277         dev = &rte_eth_devices[port_id];
2278
2279         if (udp_tunnel == NULL) {
2280                 PMD_DEBUG_TRACE("Invalid udp_tunnel parametr\n");
2281                 return -EINVAL;
2282         }
2283
2284         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2285                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2286                 return -EINVAL;
2287         }
2288
2289         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2290         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2291 }
2292
2293 int
2294 rte_eth_led_on(uint8_t port_id)
2295 {
2296         struct rte_eth_dev *dev;
2297
2298         if (!rte_eth_dev_is_valid_port(port_id)) {
2299                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2300                 return (-ENODEV);
2301         }
2302
2303         dev = &rte_eth_devices[port_id];
2304         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2305         return ((*dev->dev_ops->dev_led_on)(dev));
2306 }
2307
2308 int
2309 rte_eth_led_off(uint8_t port_id)
2310 {
2311         struct rte_eth_dev *dev;
2312
2313         if (!rte_eth_dev_is_valid_port(port_id)) {
2314                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2315                 return (-ENODEV);
2316         }
2317
2318         dev = &rte_eth_devices[port_id];
2319         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2320         return ((*dev->dev_ops->dev_led_off)(dev));
2321 }
2322
2323 /*
2324  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2325  * an empty spot.
2326  */
2327 static inline int
2328 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2329 {
2330         struct rte_eth_dev_info dev_info;
2331         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2332         unsigned i;
2333
2334         rte_eth_dev_info_get(port_id, &dev_info);
2335
2336         for (i = 0; i < dev_info.max_mac_addrs; i++)
2337                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2338                         return i;
2339
2340         return -1;
2341 }
2342
2343 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
2344
2345 int
2346 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2347                         uint32_t pool)
2348 {
2349         struct rte_eth_dev *dev;
2350         int index;
2351         uint64_t pool_mask;
2352
2353         if (!rte_eth_dev_is_valid_port(port_id)) {
2354                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2355                 return (-ENODEV);
2356         }
2357
2358         dev = &rte_eth_devices[port_id];
2359         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2360
2361         if (is_zero_ether_addr(addr)) {
2362                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2363                         port_id);
2364                 return (-EINVAL);
2365         }
2366         if (pool >= ETH_64_POOLS) {
2367                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
2368                 return (-EINVAL);
2369         }
2370
2371         index = get_mac_addr_index(port_id, addr);
2372         if (index < 0) {
2373                 index = get_mac_addr_index(port_id, &null_mac_addr);
2374                 if (index < 0) {
2375                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2376                                 port_id);
2377                         return (-ENOSPC);
2378                 }
2379         } else {
2380                 pool_mask = dev->data->mac_pool_sel[index];
2381
2382                 /* Check if both MAC address and pool is alread there, and do nothing */
2383                 if (pool_mask & (1ULL << pool))
2384                         return 0;
2385         }
2386
2387         /* Update NIC */
2388         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2389
2390         /* Update address in NIC data structure */
2391         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2392
2393         /* Update pool bitmap in NIC data structure */
2394         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2395
2396         return 0;
2397 }
2398
2399 int
2400 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2401 {
2402         struct rte_eth_dev *dev;
2403         int index;
2404
2405         if (!rte_eth_dev_is_valid_port(port_id)) {
2406                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2407                 return (-ENODEV);
2408         }
2409
2410         dev = &rte_eth_devices[port_id];
2411         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2412
2413         index = get_mac_addr_index(port_id, addr);
2414         if (index == 0) {
2415                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2416                 return (-EADDRINUSE);
2417         } else if (index < 0)
2418                 return 0;  /* Do nothing if address wasn't found */
2419
2420         /* Update NIC */
2421         (*dev->dev_ops->mac_addr_remove)(dev, index);
2422
2423         /* Update address in NIC data structure */
2424         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2425
2426         /* reset pool bitmap */
2427         dev->data->mac_pool_sel[index] = 0;
2428
2429         return 0;
2430 }
2431
2432 int
2433 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2434                                 uint16_t rx_mode, uint8_t on)
2435 {
2436         uint16_t num_vfs;
2437         struct rte_eth_dev *dev;
2438         struct rte_eth_dev_info dev_info;
2439
2440         if (!rte_eth_dev_is_valid_port(port_id)) {
2441                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
2442                                 port_id);
2443                 return (-ENODEV);
2444         }
2445
2446         dev = &rte_eth_devices[port_id];
2447         rte_eth_dev_info_get(port_id, &dev_info);
2448
2449         num_vfs = dev_info.max_vfs;
2450         if (vf > num_vfs)
2451         {
2452                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2453                 return (-EINVAL);
2454         }
2455         if (rx_mode == 0)
2456         {
2457                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2458                 return (-EINVAL);
2459         }
2460         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2461         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2462 }
2463
2464 /*
2465  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2466  * an empty spot.
2467  */
2468 static inline int
2469 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2470 {
2471         struct rte_eth_dev_info dev_info;
2472         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2473         unsigned i;
2474
2475         rte_eth_dev_info_get(port_id, &dev_info);
2476         if (!dev->data->hash_mac_addrs)
2477                 return -1;
2478
2479         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2480                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2481                         ETHER_ADDR_LEN) == 0)
2482                         return i;
2483
2484         return -1;
2485 }
2486
2487 int
2488 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2489                                 uint8_t on)
2490 {
2491         int index;
2492         int ret;
2493         struct rte_eth_dev *dev;
2494
2495         if (!rte_eth_dev_is_valid_port(port_id)) {
2496                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2497                         port_id);
2498                 return (-ENODEV);
2499         }
2500
2501         dev = &rte_eth_devices[port_id];
2502         if (is_zero_ether_addr(addr)) {
2503                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2504                         port_id);
2505                 return (-EINVAL);
2506         }
2507
2508         index = get_hash_mac_addr_index(port_id, addr);
2509         /* Check if it's already there, and do nothing */
2510         if ((index >= 0) && (on))
2511                 return 0;
2512
2513         if (index < 0) {
2514                 if (!on) {
2515                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
2516                                 "set in UTA\n", port_id);
2517                         return (-EINVAL);
2518                 }
2519
2520                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2521                 if (index < 0) {
2522                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2523                                         port_id);
2524                         return (-ENOSPC);
2525                 }
2526         }
2527
2528         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2529         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2530         if (ret == 0) {
2531                 /* Update address in NIC data structure */
2532                 if (on)
2533                         ether_addr_copy(addr,
2534                                         &dev->data->hash_mac_addrs[index]);
2535                 else
2536                         ether_addr_copy(&null_mac_addr,
2537                                         &dev->data->hash_mac_addrs[index]);
2538         }
2539
2540         return ret;
2541 }
2542
2543 int
2544 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2545 {
2546         struct rte_eth_dev *dev;
2547
2548         if (!rte_eth_dev_is_valid_port(port_id)) {
2549                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2550                         port_id);
2551                 return (-ENODEV);
2552         }
2553
2554         dev = &rte_eth_devices[port_id];
2555
2556         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2557         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2558 }
2559
2560 int
2561 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
2562 {
2563         uint16_t num_vfs;
2564         struct rte_eth_dev *dev;
2565         struct rte_eth_dev_info dev_info;
2566
2567         if (!rte_eth_dev_is_valid_port(port_id)) {
2568                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2569                 return (-ENODEV);
2570         }
2571
2572         dev = &rte_eth_devices[port_id];
2573         rte_eth_dev_info_get(port_id, &dev_info);
2574
2575         num_vfs = dev_info.max_vfs;
2576         if (vf > num_vfs)
2577         {
2578                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2579                 return (-EINVAL);
2580         }
2581
2582         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2583         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2584 }
2585
2586 int
2587 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2588 {
2589         uint16_t num_vfs;
2590         struct rte_eth_dev *dev;
2591         struct rte_eth_dev_info dev_info;
2592
2593         if (!rte_eth_dev_is_valid_port(port_id)) {
2594                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2595                 return (-ENODEV);
2596         }
2597
2598         dev = &rte_eth_devices[port_id];
2599         rte_eth_dev_info_get(port_id, &dev_info);
2600
2601         num_vfs = dev_info.max_vfs;
2602         if (vf > num_vfs)
2603         {
2604                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2605                 return (-EINVAL);
2606         }
2607
2608         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2609         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2610 }
2611
2612 int
2613 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2614                                  uint64_t vf_mask,uint8_t vlan_on)
2615 {
2616         struct rte_eth_dev *dev;
2617
2618         if (!rte_eth_dev_is_valid_port(port_id)) {
2619                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2620                                 port_id);
2621                 return (-ENODEV);
2622         }
2623         dev = &rte_eth_devices[port_id];
2624
2625         if(vlan_id > ETHER_MAX_VLAN_ID)
2626         {
2627                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2628                         vlan_id);
2629                 return (-EINVAL);
2630         }
2631         if (vf_mask == 0)
2632         {
2633                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2634                 return (-EINVAL);
2635         }
2636
2637         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2638         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2639                                                 vf_mask,vlan_on);
2640 }
2641
2642 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2643                                         uint16_t tx_rate)
2644 {
2645         struct rte_eth_dev *dev;
2646         struct rte_eth_dev_info dev_info;
2647         struct rte_eth_link link;
2648
2649         if (!rte_eth_dev_is_valid_port(port_id)) {
2650                 PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
2651                                 port_id);
2652                 return -ENODEV;
2653         }
2654
2655         dev = &rte_eth_devices[port_id];
2656         rte_eth_dev_info_get(port_id, &dev_info);
2657         link = dev->data->dev_link;
2658
2659         if (queue_idx > dev_info.max_tx_queues) {
2660                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2661                                 "invalid queue id=%d\n", port_id, queue_idx);
2662                 return -EINVAL;
2663         }
2664
2665         if (tx_rate > link.link_speed) {
2666                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2667                                 "bigger than link speed= %d\n",
2668                         tx_rate, link.link_speed);
2669                 return -EINVAL;
2670         }
2671
2672         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2673         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2674 }
2675
2676 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2677                                 uint64_t q_msk)
2678 {
2679         struct rte_eth_dev *dev;
2680         struct rte_eth_dev_info dev_info;
2681         struct rte_eth_link link;
2682
2683         if (q_msk == 0)
2684                 return 0;
2685
2686         if (!rte_eth_dev_is_valid_port(port_id)) {
2687                 PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
2688                                 port_id);
2689                 return -ENODEV;
2690         }
2691
2692         dev = &rte_eth_devices[port_id];
2693         rte_eth_dev_info_get(port_id, &dev_info);
2694         link = dev->data->dev_link;
2695
2696         if (vf > dev_info.max_vfs) {
2697                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2698                                 "invalid vf id=%d\n", port_id, vf);
2699                 return -EINVAL;
2700         }
2701
2702         if (tx_rate > link.link_speed) {
2703                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2704                                 "bigger than link speed= %d\n",
2705                                 tx_rate, link.link_speed);
2706                 return -EINVAL;
2707         }
2708
2709         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2710         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2711 }
2712
2713 int
2714 rte_eth_mirror_rule_set(uint8_t port_id,
2715                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
2716                         uint8_t rule_id, uint8_t on)
2717 {
2718         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2719
2720         if (!rte_eth_dev_is_valid_port(port_id)) {
2721                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2722                 return (-ENODEV);
2723         }
2724
2725         if (mirror_conf->rule_type_mask == 0) {
2726                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2727                 return (-EINVAL);
2728         }
2729
2730         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2731                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
2732                         "be 0-%d\n",ETH_64_POOLS - 1);
2733                 return (-EINVAL);
2734         }
2735
2736         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
2737                 (mirror_conf->pool_mask == 0)) {
2738                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
2739                                 "be 0.\n");
2740                 return (-EINVAL);
2741         }
2742
2743         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2744         {
2745                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2746                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
2747                 return (-EINVAL);
2748         }
2749
2750         dev = &rte_eth_devices[port_id];
2751         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2752
2753         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2754 }
2755
2756 int
2757 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2758 {
2759         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2760
2761         if (!rte_eth_dev_is_valid_port(port_id)) {
2762                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2763                 return (-ENODEV);
2764         }
2765
2766         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2767         {
2768                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2769                         ETH_VMDQ_NUM_MIRROR_RULE-1);
2770                 return (-EINVAL);
2771         }
2772
2773         dev = &rte_eth_devices[port_id];
2774         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2775
2776         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2777 }
2778
2779 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2780 uint16_t
2781 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2782                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2783 {
2784         struct rte_eth_dev *dev;
2785
2786         if (!rte_eth_dev_is_valid_port(port_id)) {
2787                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2788                 return 0;
2789         }
2790
2791         dev = &rte_eth_devices[port_id];
2792         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2793         if (queue_id >= dev->data->nb_rx_queues) {
2794                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2795                 return 0;
2796         }
2797         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2798                                                 rx_pkts, nb_pkts);
2799 }
2800
2801 uint16_t
2802 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2803                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2804 {
2805         struct rte_eth_dev *dev;
2806
2807         if (!rte_eth_dev_is_valid_port(port_id)) {
2808                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2809                 return 0;
2810         }
2811
2812         dev = &rte_eth_devices[port_id];
2813
2814         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2815         if (queue_id >= dev->data->nb_tx_queues) {
2816                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2817                 return 0;
2818         }
2819         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2820                                                 tx_pkts, nb_pkts);
2821 }
2822
2823 uint32_t
2824 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2825 {
2826         struct rte_eth_dev *dev;
2827
2828         if (!rte_eth_dev_is_valid_port(port_id)) {
2829                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2830                 return 0;
2831         }
2832
2833         dev = &rte_eth_devices[port_id];
2834         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2835         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2836 }
2837
2838 int
2839 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2840 {
2841         struct rte_eth_dev *dev;
2842
2843         if (!rte_eth_dev_is_valid_port(port_id)) {
2844                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2845                 return (-ENODEV);
2846         }
2847
2848         dev = &rte_eth_devices[port_id];
2849         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2850         return (*dev->dev_ops->rx_descriptor_done)( \
2851                 dev->data->rx_queues[queue_id], offset);
2852 }
2853 #endif
2854
2855 int
2856 rte_eth_dev_callback_register(uint8_t port_id,
2857                         enum rte_eth_event_type event,
2858                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2859 {
2860         struct rte_eth_dev *dev;
2861         struct rte_eth_dev_callback *user_cb;
2862
2863         if (!cb_fn)
2864                 return (-EINVAL);
2865
2866         if (!rte_eth_dev_is_valid_port(port_id)) {
2867                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2868                 return (-EINVAL);
2869         }
2870
2871         dev = &rte_eth_devices[port_id];
2872         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2873
2874         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2875                 if (user_cb->cb_fn == cb_fn &&
2876                         user_cb->cb_arg == cb_arg &&
2877                         user_cb->event == event) {
2878                         break;
2879                 }
2880         }
2881
2882         /* create a new callback. */
2883         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2884                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2885                 user_cb->cb_fn = cb_fn;
2886                 user_cb->cb_arg = cb_arg;
2887                 user_cb->event = event;
2888                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2889         }
2890
2891         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2892         return ((user_cb == NULL) ? -ENOMEM : 0);
2893 }
2894
2895 int
2896 rte_eth_dev_callback_unregister(uint8_t port_id,
2897                         enum rte_eth_event_type event,
2898                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2899 {
2900         int ret;
2901         struct rte_eth_dev *dev;
2902         struct rte_eth_dev_callback *cb, *next;
2903
2904         if (!cb_fn)
2905                 return (-EINVAL);
2906
2907         if (!rte_eth_dev_is_valid_port(port_id)) {
2908                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2909                 return (-EINVAL);
2910         }
2911
2912         dev = &rte_eth_devices[port_id];
2913         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2914
2915         ret = 0;
2916         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2917
2918                 next = TAILQ_NEXT(cb, next);
2919
2920                 if (cb->cb_fn != cb_fn || cb->event != event ||
2921                                 (cb->cb_arg != (void *)-1 &&
2922                                 cb->cb_arg != cb_arg))
2923                         continue;
2924
2925                 /*
2926                  * if this callback is not executing right now,
2927                  * then remove it.
2928                  */
2929                 if (cb->active == 0) {
2930                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2931                         rte_free(cb);
2932                 } else {
2933                         ret = -EAGAIN;
2934                 }
2935         }
2936
2937         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2938         return (ret);
2939 }
2940
2941 void
2942 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2943         enum rte_eth_event_type event)
2944 {
2945         struct rte_eth_dev_callback *cb_lst;
2946         struct rte_eth_dev_callback dev_cb;
2947
2948         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2949         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2950                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2951                         continue;
2952                 dev_cb = *cb_lst;
2953                 cb_lst->active = 1;
2954                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2955                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2956                                                 dev_cb.cb_arg);
2957                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2958                 cb_lst->active = 0;
2959         }
2960         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2961 }
2962 #ifdef RTE_NIC_BYPASS
2963 int rte_eth_dev_bypass_init(uint8_t port_id)
2964 {
2965         struct rte_eth_dev *dev;
2966
2967         if (!rte_eth_dev_is_valid_port(port_id)) {
2968                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2969                 return (-ENODEV);
2970         }
2971
2972         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2973                 PMD_DEBUG_TRACE("Invalid port device\n");
2974                 return (-ENODEV);
2975         }
2976
2977         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2978         (*dev->dev_ops->bypass_init)(dev);
2979         return 0;
2980 }
2981
2982 int
2983 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2984 {
2985         struct rte_eth_dev *dev;
2986
2987         if (!rte_eth_dev_is_valid_port(port_id)) {
2988                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2989                 return (-ENODEV);
2990         }
2991
2992         if ((dev= &rte_eth_devices[port_id]) == NULL) {
2993                 PMD_DEBUG_TRACE("Invalid port device\n");
2994                 return (-ENODEV);
2995         }
2996         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2997         (*dev->dev_ops->bypass_state_show)(dev, state);
2998         return 0;
2999 }
3000
3001 int
3002 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
3003 {
3004         struct rte_eth_dev *dev;
3005
3006         if (!rte_eth_dev_is_valid_port(port_id)) {
3007                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3008                 return (-ENODEV);
3009         }
3010
3011         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3012                 PMD_DEBUG_TRACE("Invalid port device\n");
3013                 return (-ENODEV);
3014         }
3015
3016         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
3017         (*dev->dev_ops->bypass_state_set)(dev, new_state);
3018         return 0;
3019 }
3020
3021 int
3022 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
3023 {
3024         struct rte_eth_dev *dev;
3025
3026         if (!rte_eth_dev_is_valid_port(port_id)) {
3027                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3028                 return (-ENODEV);
3029         }
3030
3031         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3032                 PMD_DEBUG_TRACE("Invalid port device\n");
3033                 return (-ENODEV);
3034         }
3035
3036         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3037         (*dev->dev_ops->bypass_event_show)(dev, event, state);
3038         return 0;
3039 }
3040
3041 int
3042 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
3043 {
3044         struct rte_eth_dev *dev;
3045
3046         if (!rte_eth_dev_is_valid_port(port_id)) {
3047                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3048                 return (-ENODEV);
3049         }
3050
3051         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3052                 PMD_DEBUG_TRACE("Invalid port device\n");
3053                 return (-ENODEV);
3054         }
3055
3056         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
3057         (*dev->dev_ops->bypass_event_set)(dev, event, state);
3058         return 0;
3059 }
3060
3061 int
3062 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
3063 {
3064         struct rte_eth_dev *dev;
3065
3066         if (!rte_eth_dev_is_valid_port(port_id)) {
3067                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3068                 return (-ENODEV);
3069         }
3070
3071         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3072                 PMD_DEBUG_TRACE("Invalid port device\n");
3073                 return (-ENODEV);
3074         }
3075
3076         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
3077         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
3078         return 0;
3079 }
3080
3081 int
3082 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
3083 {
3084         struct rte_eth_dev *dev;
3085
3086         if (!rte_eth_dev_is_valid_port(port_id)) {
3087                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3088                 return (-ENODEV);
3089         }
3090
3091         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3092                 PMD_DEBUG_TRACE("Invalid port device\n");
3093                 return (-ENODEV);
3094         }
3095
3096         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
3097         (*dev->dev_ops->bypass_ver_show)(dev, ver);
3098         return 0;
3099 }
3100
3101 int
3102 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
3103 {
3104         struct rte_eth_dev *dev;
3105
3106         if (!rte_eth_dev_is_valid_port(port_id)) {
3107                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3108                 return (-ENODEV);
3109         }
3110
3111         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3112                 PMD_DEBUG_TRACE("Invalid port device\n");
3113                 return (-ENODEV);
3114         }
3115
3116         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
3117         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
3118         return 0;
3119 }
3120
3121 int
3122 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
3123 {
3124         struct rte_eth_dev *dev;
3125
3126         if (!rte_eth_dev_is_valid_port(port_id)) {
3127                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3128                 return (-ENODEV);
3129         }
3130
3131         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3132                 PMD_DEBUG_TRACE("Invalid port device\n");
3133                 return (-ENODEV);
3134         }
3135
3136         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
3137         (*dev->dev_ops->bypass_wd_reset)(dev);
3138         return 0;
3139 }
3140 #endif
3141
3142 int
3143 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
3144 {
3145         struct rte_eth_dev *dev;
3146
3147         if (!rte_eth_dev_is_valid_port(port_id)) {
3148                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3149                 return -ENODEV;
3150         }
3151
3152         dev = &rte_eth_devices[port_id];
3153         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3154         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3155                                 RTE_ETH_FILTER_NOP, NULL);
3156 }
3157
3158 int
3159 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3160                        enum rte_filter_op filter_op, void *arg)
3161 {
3162         struct rte_eth_dev *dev;
3163
3164         if (!rte_eth_dev_is_valid_port(port_id)) {
3165                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3166                 return -ENODEV;
3167         }
3168
3169         dev = &rte_eth_devices[port_id];
3170         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3171         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3172 }
3173
3174 void *
3175 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3176                 rte_rxtx_callback_fn fn, void *user_param)
3177 {
3178 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3179         rte_errno = ENOTSUP;
3180         return NULL;
3181 #endif
3182         /* check input parameters */
3183         if (port_id >= nb_ports || fn == NULL ||
3184                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3185                 rte_errno = EINVAL;
3186                 return NULL;
3187         }
3188
3189         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3190
3191         if (cb == NULL) {
3192                 rte_errno = ENOMEM;
3193                 return NULL;
3194         }
3195
3196         cb->fn = fn;
3197         cb->param = user_param;
3198         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3199         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3200         return cb;
3201 }
3202
3203 void *
3204 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3205                 rte_rxtx_callback_fn fn, void *user_param)
3206 {
3207 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3208         rte_errno = ENOTSUP;
3209         return NULL;
3210 #endif
3211         /* check input parameters */
3212         if (port_id >= nb_ports || fn == NULL ||
3213                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3214                 rte_errno = EINVAL;
3215                 return NULL;
3216         }
3217
3218         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3219
3220         if (cb == NULL) {
3221                 rte_errno = ENOMEM;
3222                 return NULL;
3223         }
3224
3225         cb->fn = fn;
3226         cb->param = user_param;
3227         cb->next = rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3228         rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3229         return cb;
3230 }
3231
3232 int
3233 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3234                 struct rte_eth_rxtx_callback *user_cb)
3235 {
3236 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3237         return (-ENOTSUP);
3238 #endif
3239         /* Check input parameters. */
3240         if (port_id >= nb_ports || user_cb == NULL ||
3241                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3242                 return (-EINVAL);
3243         }
3244
3245         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3246         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3247         struct rte_eth_rxtx_callback *prev_cb;
3248
3249         /* Reset head pointer and remove user cb if first in the list. */
3250         if (cb == user_cb) {
3251                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3252                 return 0;
3253         }
3254
3255         /* Remove the user cb from the callback list. */
3256         do {
3257                 prev_cb = cb;
3258                 cb = cb->next;
3259
3260                 if (cb == user_cb) {
3261                         prev_cb->next = user_cb->next;
3262                         return 0;
3263                 }
3264
3265         } while (cb != NULL);
3266
3267         /* Callback wasn't found. */
3268         return (-EINVAL);
3269 }
3270
3271 int
3272 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3273                 struct rte_eth_rxtx_callback *user_cb)
3274 {
3275 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3276         return (-ENOTSUP);
3277 #endif
3278         /* Check input parameters. */
3279         if (port_id >= nb_ports || user_cb == NULL ||
3280                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3281                 return (-EINVAL);
3282         }
3283
3284         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3285         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3286         struct rte_eth_rxtx_callback *prev_cb;
3287
3288         /* Reset head pointer and remove user cb if first in the list. */
3289         if (cb == user_cb) {
3290                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3291                 return 0;
3292         }
3293
3294         /* Remove the user cb from the callback list. */
3295         do {
3296                 prev_cb = cb;
3297                 cb = cb->next;
3298
3299                 if (cb == user_cb) {
3300                         prev_cb->next = user_cb->next;
3301                         return 0;
3302                 }
3303
3304         } while (cb != NULL);
3305
3306         /* Callback wasn't found. */
3307         return (-EINVAL);
3308 }