a26af7593b0ee97b2eaf6120d75aa127ca2cc669
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_common.h>
62 #include <rte_ring.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_mbuf.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
68 #include <rte_string_fns.h>
69
70 #include "rte_ether.h"
71 #include "rte_ethdev.h"
72
73 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
74 #define PMD_DEBUG_TRACE(fmt, args...) do {                        \
75                 RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
76         } while (0)
77 #else
78 #define PMD_DEBUG_TRACE(fmt, args...)
79 #endif
80
81 /* Macros for checking for restricting functions to primary instance only */
82 #define PROC_PRIMARY_OR_ERR_RET(retval) do { \
83         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
84                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
85                 return (retval); \
86         } \
87 } while(0)
88 #define PROC_PRIMARY_OR_RET() do { \
89         if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
90                 PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
91                 return; \
92         } \
93 } while(0)
94
95 /* Macros to check for invlaid function pointers in dev_ops structure */
96 #define FUNC_PTR_OR_ERR_RET(func, retval) do { \
97         if ((func) == NULL) { \
98                 PMD_DEBUG_TRACE("Function not supported\n"); \
99                 return (retval); \
100         } \
101 } while(0)
102 #define FUNC_PTR_OR_RET(func) do { \
103         if ((func) == NULL) { \
104                 PMD_DEBUG_TRACE("Function not supported\n"); \
105                 return; \
106         } \
107 } while(0)
108
109 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
110 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
111 static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
112 static uint8_t nb_ports = 0;
113
114 /* spinlock for eth device callbacks */
115 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
116
117 /* store statistics names and its offset in stats structure  */
118 struct rte_eth_xstats_name_off {
119         char name[RTE_ETH_XSTATS_NAME_SIZE];
120         unsigned offset;
121 };
122
123 static struct rte_eth_xstats_name_off rte_stats_strings[] = {
124          {"rx_packets", offsetof(struct rte_eth_stats, ipackets)},
125          {"tx_packets", offsetof(struct rte_eth_stats, opackets)},
126          {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)},
127          {"tx_bytes", offsetof(struct rte_eth_stats, obytes)},
128          {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
129          {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
130          {"rx_crc_errors", offsetof(struct rte_eth_stats, ibadcrc)},
131          {"rx_bad_length_errors", offsetof(struct rte_eth_stats, ibadlen)},
132          {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
133          {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)},
134          {"fdir_match", offsetof(struct rte_eth_stats, fdirmatch)},
135          {"fdir_miss", offsetof(struct rte_eth_stats, fdirmiss)},
136          {"tx_flow_control_xon", offsetof(struct rte_eth_stats, tx_pause_xon)},
137          {"rx_flow_control_xon", offsetof(struct rte_eth_stats, rx_pause_xon)},
138          {"tx_flow_control_xoff", offsetof(struct rte_eth_stats, tx_pause_xoff)},
139          {"rx_flow_control_xoff", offsetof(struct rte_eth_stats, rx_pause_xoff)},
140 };
141 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
142
143 static struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
144         {"rx_packets", offsetof(struct rte_eth_stats, q_ipackets)},
145         {"rx_bytes", offsetof(struct rte_eth_stats, q_ibytes)},
146 };
147 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
148                 sizeof(rte_rxq_stats_strings[0]))
149
150 static struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
151         {"tx_packets", offsetof(struct rte_eth_stats, q_opackets)},
152         {"tx_bytes", offsetof(struct rte_eth_stats, q_obytes)},
153         {"tx_errors", offsetof(struct rte_eth_stats, q_errors)},
154 };
155 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
156                 sizeof(rte_txq_stats_strings[0]))
157
158
159 /**
160  * The user application callback description.
161  *
162  * It contains callback address to be registered by user application,
163  * the pointer to the parameters for callback, and the event type.
164  */
165 struct rte_eth_dev_callback {
166         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
167         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
168         void *cb_arg;                           /**< Parameter for callback */
169         enum rte_eth_event_type event;          /**< Interrupt event type */
170         uint32_t active;                        /**< Callback is executing */
171 };
172
173 enum {
174         STAT_QMAP_TX = 0,
175         STAT_QMAP_RX
176 };
177
178 enum {
179         DEV_DETACHED = 0,
180         DEV_ATTACHED
181 };
182
183 static inline void
184 rte_eth_dev_data_alloc(void)
185 {
186         const unsigned flags = 0;
187         const struct rte_memzone *mz;
188
189         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
190                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
191                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
192                                 rte_socket_id(), flags);
193         } else
194                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
195         if (mz == NULL)
196                 rte_panic("Cannot allocate memzone for ethernet port data\n");
197
198         rte_eth_dev_data = mz->addr;
199         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
200                 memset(rte_eth_dev_data, 0,
201                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
202 }
203
204 static struct rte_eth_dev *
205 rte_eth_dev_allocated(const char *name)
206 {
207         unsigned i;
208
209         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
210                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
211                     strcmp(rte_eth_devices[i].data->name, name) == 0)
212                         return &rte_eth_devices[i];
213         }
214         return NULL;
215 }
216
217 static uint8_t
218 rte_eth_dev_find_free_port(void)
219 {
220         unsigned i;
221
222         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
223                 if (rte_eth_devices[i].attached == DEV_DETACHED)
224                         return i;
225         }
226         return RTE_MAX_ETHPORTS;
227 }
228
229 struct rte_eth_dev *
230 rte_eth_dev_allocate(const char *name)
231 {
232         uint8_t port_id;
233         struct rte_eth_dev *eth_dev;
234
235         port_id = rte_eth_dev_find_free_port();
236         if (port_id == RTE_MAX_ETHPORTS) {
237                 PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
238                 return NULL;
239         }
240
241         if (rte_eth_dev_data == NULL)
242                 rte_eth_dev_data_alloc();
243
244         if (rte_eth_dev_allocated(name) != NULL) {
245                 PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n", name);
246                 return NULL;
247         }
248
249         eth_dev = &rte_eth_devices[port_id];
250         eth_dev->data = &rte_eth_dev_data[port_id];
251         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
252         eth_dev->data->port_id = port_id;
253         eth_dev->attached = DEV_ATTACHED;
254         nb_ports++;
255         return eth_dev;
256 }
257
258 static inline int
259 rte_eth_dev_create_unique_device_name(char *name, size_t size,
260                 struct rte_pci_device *pci_dev)
261 {
262         int ret;
263
264         if ((name == NULL) || (pci_dev == NULL))
265                 return -EINVAL;
266
267         ret = snprintf(name, size, "%d:%d.%d",
268                         pci_dev->addr.bus, pci_dev->addr.devid,
269                         pci_dev->addr.function);
270         if (ret < 0)
271                 return ret;
272
273         return 0;
274 }
275
276 int
277 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
278 {
279         if (eth_dev == NULL)
280                 return -EINVAL;
281
282         eth_dev->attached = 0;
283         nb_ports--;
284         return 0;
285 }
286
287 static int
288 rte_eth_dev_init(struct rte_pci_driver *pci_drv,
289                  struct rte_pci_device *pci_dev)
290 {
291         struct eth_driver    *eth_drv;
292         struct rte_eth_dev *eth_dev;
293         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
294
295         int diag;
296
297         eth_drv = (struct eth_driver *)pci_drv;
298
299         /* Create unique Ethernet device name using PCI address */
300         rte_eth_dev_create_unique_device_name(ethdev_name,
301                         sizeof(ethdev_name), pci_dev);
302
303         eth_dev = rte_eth_dev_allocate(ethdev_name);
304         if (eth_dev == NULL)
305                 return -ENOMEM;
306
307         if (rte_eal_process_type() == RTE_PROC_PRIMARY){
308                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
309                                   eth_drv->dev_private_size,
310                                   RTE_CACHE_LINE_SIZE);
311                 if (eth_dev->data->dev_private == NULL)
312                         rte_panic("Cannot allocate memzone for private port data\n");
313         }
314         eth_dev->pci_dev = pci_dev;
315         eth_dev->driver = eth_drv;
316         eth_dev->data->rx_mbuf_alloc_failed = 0;
317
318         /* init user callbacks */
319         TAILQ_INIT(&(eth_dev->link_intr_cbs));
320
321         /*
322          * Set the default MTU.
323          */
324         eth_dev->data->mtu = ETHER_MTU;
325
326         /* Invoke PMD device initialization function */
327         diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
328         if (diag == 0)
329                 return (0);
330
331         PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
332                         " failed\n", pci_drv->name,
333                         (unsigned) pci_dev->id.vendor_id,
334                         (unsigned) pci_dev->id.device_id);
335         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
336                 rte_free(eth_dev->data->dev_private);
337         eth_dev->attached = DEV_DETACHED;
338         nb_ports--;
339         return diag;
340 }
341
342 static int
343 rte_eth_dev_uninit(struct rte_pci_device *pci_dev)
344 {
345         const struct eth_driver *eth_drv;
346         struct rte_eth_dev *eth_dev;
347         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
348         int ret;
349
350         if (pci_dev == NULL)
351                 return -EINVAL;
352
353         /* Create unique Ethernet device name using PCI address */
354         rte_eth_dev_create_unique_device_name(ethdev_name,
355                         sizeof(ethdev_name), pci_dev);
356
357         eth_dev = rte_eth_dev_allocated(ethdev_name);
358         if (eth_dev == NULL)
359                 return -ENODEV;
360
361         eth_drv = (const struct eth_driver *)pci_dev->driver;
362
363         /* Invoke PMD device uninit function */
364         if (*eth_drv->eth_dev_uninit) {
365                 ret = (*eth_drv->eth_dev_uninit)(eth_drv, eth_dev);
366                 if (ret)
367                         return ret;
368         }
369
370         /* free ether device */
371         rte_eth_dev_release_port(eth_dev);
372
373         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
374                 rte_free(eth_dev->data->dev_private);
375
376         eth_dev->pci_dev = NULL;
377         eth_dev->driver = NULL;
378         eth_dev->data = NULL;
379
380         return 0;
381 }
382
383 /**
384  * Register an Ethernet [Poll Mode] driver.
385  *
386  * Function invoked by the initialization function of an Ethernet driver
387  * to simultaneously register itself as a PCI driver and as an Ethernet
388  * Poll Mode Driver.
389  * Invokes the rte_eal_pci_register() function to register the *pci_drv*
390  * structure embedded in the *eth_drv* structure, after having stored the
391  * address of the rte_eth_dev_init() function in the *devinit* field of
392  * the *pci_drv* structure.
393  * During the PCI probing phase, the rte_eth_dev_init() function is
394  * invoked for each PCI [Ethernet device] matching the embedded PCI
395  * identifiers provided by the driver.
396  */
397 void
398 rte_eth_driver_register(struct eth_driver *eth_drv)
399 {
400         eth_drv->pci_drv.devinit = rte_eth_dev_init;
401         eth_drv->pci_drv.devuninit = rte_eth_dev_uninit;
402         rte_eal_pci_register(&eth_drv->pci_drv);
403 }
404
405 static int
406 rte_eth_dev_is_valid_port(uint8_t port_id)
407 {
408         if (port_id >= RTE_MAX_ETHPORTS ||
409             rte_eth_devices[port_id].attached != DEV_ATTACHED)
410                 return 0;
411         else
412                 return 1;
413 }
414
415 int
416 rte_eth_dev_socket_id(uint8_t port_id)
417 {
418         if (!rte_eth_dev_is_valid_port(port_id))
419                 return -1;
420         return rte_eth_devices[port_id].pci_dev->numa_node;
421 }
422
423 uint8_t
424 rte_eth_dev_count(void)
425 {
426         return (nb_ports);
427 }
428
429 static int
430 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
431 {
432         uint16_t old_nb_queues = dev->data->nb_rx_queues;
433         void **rxq;
434         unsigned i;
435
436         if (dev->data->rx_queues == NULL) { /* first time configuration */
437                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
438                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
439                                 RTE_CACHE_LINE_SIZE);
440                 if (dev->data->rx_queues == NULL) {
441                         dev->data->nb_rx_queues = 0;
442                         return -(ENOMEM);
443                 }
444 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
445                 dev->post_rx_burst_cbs = rte_zmalloc(
446                         "ethdev->post_rx_burst_cbs",
447                         sizeof(*dev->post_rx_burst_cbs) * nb_queues,
448                         RTE_CACHE_LINE_SIZE);
449                 if (dev->post_rx_burst_cbs == NULL) {
450                         rte_free(dev->data->rx_queues);
451                         dev->data->rx_queues = NULL;
452                         dev->data->nb_rx_queues = 0;
453                         return -ENOMEM;
454                 }
455 #endif
456
457         } else { /* re-configure */
458                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
459
460                 rxq = dev->data->rx_queues;
461
462                 for (i = nb_queues; i < old_nb_queues; i++)
463                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
464                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
465                                 RTE_CACHE_LINE_SIZE);
466                 if (rxq == NULL)
467                         return -(ENOMEM);
468 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
469                 dev->post_rx_burst_cbs = rte_realloc(
470                         dev->post_rx_burst_cbs,
471                         sizeof(*dev->post_rx_burst_cbs) *
472                                 nb_queues, RTE_CACHE_LINE_SIZE);
473                 if (dev->post_rx_burst_cbs == NULL)
474                         return -ENOMEM;
475 #endif
476                 if (nb_queues > old_nb_queues) {
477                         uint16_t new_qs = nb_queues - old_nb_queues;
478                         memset(rxq + old_nb_queues, 0,
479                                 sizeof(rxq[0]) * new_qs);
480 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
481                         memset(dev->post_rx_burst_cbs + old_nb_queues, 0,
482                                 sizeof(dev->post_rx_burst_cbs[0]) * new_qs);
483 #endif
484                 }
485
486                 dev->data->rx_queues = rxq;
487
488         }
489         dev->data->nb_rx_queues = nb_queues;
490         return (0);
491 }
492
493 int
494 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
495 {
496         struct rte_eth_dev *dev;
497
498         /* This function is only safe when called from the primary process
499          * in a multi-process setup*/
500         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
501
502         if (!rte_eth_dev_is_valid_port(port_id)) {
503                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
504                 return -EINVAL;
505         }
506
507         dev = &rte_eth_devices[port_id];
508         if (rx_queue_id >= dev->data->nb_rx_queues) {
509                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
510                 return -EINVAL;
511         }
512
513         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
514
515         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
516
517 }
518
519 int
520 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
521 {
522         struct rte_eth_dev *dev;
523
524         /* This function is only safe when called from the primary process
525          * in a multi-process setup*/
526         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
527
528         if (!rte_eth_dev_is_valid_port(port_id)) {
529                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
530                 return -EINVAL;
531         }
532
533         dev = &rte_eth_devices[port_id];
534         if (rx_queue_id >= dev->data->nb_rx_queues) {
535                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
536                 return -EINVAL;
537         }
538
539         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
540
541         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
542
543 }
544
545 int
546 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
547 {
548         struct rte_eth_dev *dev;
549
550         /* This function is only safe when called from the primary process
551          * in a multi-process setup*/
552         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
553
554         if (!rte_eth_dev_is_valid_port(port_id)) {
555                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
556                 return -EINVAL;
557         }
558
559         dev = &rte_eth_devices[port_id];
560         if (tx_queue_id >= dev->data->nb_tx_queues) {
561                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
562                 return -EINVAL;
563         }
564
565         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
566
567         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
568
569 }
570
571 int
572 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
573 {
574         struct rte_eth_dev *dev;
575
576         /* This function is only safe when called from the primary process
577          * in a multi-process setup*/
578         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
579
580         if (!rte_eth_dev_is_valid_port(port_id)) {
581                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
582                 return -EINVAL;
583         }
584
585         dev = &rte_eth_devices[port_id];
586         if (tx_queue_id >= dev->data->nb_tx_queues) {
587                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
588                 return -EINVAL;
589         }
590
591         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
592
593         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
594
595 }
596
597 static int
598 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
599 {
600         uint16_t old_nb_queues = dev->data->nb_tx_queues;
601         void **txq;
602         unsigned i;
603
604         if (dev->data->tx_queues == NULL) { /* first time configuration */
605                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
606                                 sizeof(dev->data->tx_queues[0]) * nb_queues,
607                                 RTE_CACHE_LINE_SIZE);
608                 if (dev->data->tx_queues == NULL) {
609                         dev->data->nb_tx_queues = 0;
610                         return -(ENOMEM);
611                 }
612 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
613                 dev->pre_tx_burst_cbs = rte_zmalloc(
614                         "ethdev->pre_tx_burst_cbs",
615                         sizeof(*dev->pre_tx_burst_cbs) * nb_queues,
616                         RTE_CACHE_LINE_SIZE);
617                 if (dev->pre_tx_burst_cbs == NULL) {
618                         rte_free(dev->data->tx_queues);
619                         dev->data->tx_queues = NULL;
620                         dev->data->nb_tx_queues = 0;
621                         return -ENOMEM;
622                 }
623 #endif
624
625         } else { /* re-configure */
626                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
627
628                 txq = dev->data->tx_queues;
629
630                 for (i = nb_queues; i < old_nb_queues; i++)
631                         (*dev->dev_ops->tx_queue_release)(txq[i]);
632                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
633                                 RTE_CACHE_LINE_SIZE);
634                 if (txq == NULL)
635                         return -ENOMEM;
636 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
637                 dev->pre_tx_burst_cbs = rte_realloc(
638                         dev->pre_tx_burst_cbs,
639                         sizeof(*dev->pre_tx_burst_cbs) *
640                                 nb_queues, RTE_CACHE_LINE_SIZE);
641                 if (dev->pre_tx_burst_cbs == NULL)
642                         return -ENOMEM;
643 #endif
644                 if (nb_queues > old_nb_queues) {
645                         uint16_t new_qs = nb_queues - old_nb_queues;
646                         memset(txq + old_nb_queues, 0,
647                                 sizeof(txq[0]) * new_qs);
648 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
649                         memset(dev->pre_tx_burst_cbs + old_nb_queues, 0,
650                                 sizeof(dev->pre_tx_burst_cbs[0]) * new_qs);
651 #endif
652                 }
653
654                 dev->data->tx_queues = txq;
655
656         }
657         dev->data->nb_tx_queues = nb_queues;
658         return (0);
659 }
660
661 static int
662 rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
663 {
664         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
665         switch (nb_rx_q) {
666         case 1:
667         case 2:
668                 RTE_ETH_DEV_SRIOV(dev).active =
669                         ETH_64_POOLS;
670                 break;
671         case 4:
672                 RTE_ETH_DEV_SRIOV(dev).active =
673                         ETH_32_POOLS;
674                 break;
675         default:
676                 return -EINVAL;
677         }
678
679         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
680         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
681                 dev->pci_dev->max_vfs * nb_rx_q;
682
683         return 0;
684 }
685
686 static int
687 rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
688                       const struct rte_eth_conf *dev_conf)
689 {
690         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
691
692         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
693                 /* check multi-queue mode */
694                 if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
695                     (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
696                     (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
697                         /* SRIOV only works in VMDq enable mode */
698                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
699                                         " SRIOV active, "
700                                         "wrong VMDQ mq_mode rx %u tx %u\n",
701                                         port_id,
702                                         dev_conf->rxmode.mq_mode,
703                                         dev_conf->txmode.mq_mode);
704                         return (-EINVAL);
705                 }
706
707                 switch (dev_conf->rxmode.mq_mode) {
708                 case ETH_MQ_RX_VMDQ_DCB:
709                 case ETH_MQ_RX_VMDQ_DCB_RSS:
710                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
711                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
712                                         " SRIOV active, "
713                                         "unsupported VMDQ mq_mode rx %u\n",
714                                         port_id, dev_conf->rxmode.mq_mode);
715                         return (-EINVAL);
716                 case ETH_MQ_RX_RSS:
717                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
718                                         " SRIOV active, "
719                                         "Rx mq mode is changed from:"
720                                         "mq_mode %u into VMDQ mq_mode %u\n",
721                                         port_id,
722                                         dev_conf->rxmode.mq_mode,
723                                         dev->data->dev_conf.rxmode.mq_mode);
724                 case ETH_MQ_RX_VMDQ_RSS:
725                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
726                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
727                                 if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) {
728                                         PMD_DEBUG_TRACE("ethdev port_id=%d"
729                                                 " SRIOV active, invalid queue"
730                                                 " number for VMDQ RSS, allowed"
731                                                 " value are 1, 2 or 4\n",
732                                                 port_id);
733                                         return -EINVAL;
734                                 }
735                         break;
736                 default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
737                         /* if nothing mq mode configure, use default scheme */
738                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
739                         if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
740                                 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
741                         break;
742                 }
743
744                 switch (dev_conf->txmode.mq_mode) {
745                 case ETH_MQ_TX_VMDQ_DCB:
746                         /* DCB VMDQ in SRIOV mode, not implement yet */
747                         PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
748                                         " SRIOV active, "
749                                         "unsupported VMDQ mq_mode tx %u\n",
750                                         port_id, dev_conf->txmode.mq_mode);
751                         return (-EINVAL);
752                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
753                         /* if nothing mq mode configure, use default scheme */
754                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
755                         break;
756                 }
757
758                 /* check valid queue number */
759                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
760                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
761                         PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
762                                     "queue number must less equal to %d\n",
763                                         port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
764                         return (-EINVAL);
765                 }
766         } else {
767                 /* For vmdb+dcb mode check our configuration before we go further */
768                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
769                         const struct rte_eth_vmdq_dcb_conf *conf;
770
771                         if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
772                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
773                                                 "!= %d\n",
774                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
775                                 return (-EINVAL);
776                         }
777                         conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
778                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
779                                conf->nb_queue_pools == ETH_32_POOLS)) {
780                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
781                                                 "nb_queue_pools must be %d or %d\n",
782                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
783                                 return (-EINVAL);
784                         }
785                 }
786                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
787                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
788
789                         if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
790                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
791                                                 "!= %d\n",
792                                                 port_id, ETH_VMDQ_DCB_NUM_QUEUES);
793                                 return (-EINVAL);
794                         }
795                         conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
796                         if (! (conf->nb_queue_pools == ETH_16_POOLS ||
797                                conf->nb_queue_pools == ETH_32_POOLS)) {
798                                 PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
799                                                 "nb_queue_pools != %d or nb_queue_pools "
800                                                 "!= %d\n",
801                                                 port_id, ETH_16_POOLS, ETH_32_POOLS);
802                                 return (-EINVAL);
803                         }
804                 }
805
806                 /* For DCB mode check our configuration before we go further */
807                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
808                         const struct rte_eth_dcb_rx_conf *conf;
809
810                         if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
811                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
812                                                 "!= %d\n",
813                                                 port_id, ETH_DCB_NUM_QUEUES);
814                                 return (-EINVAL);
815                         }
816                         conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
817                         if (! (conf->nb_tcs == ETH_4_TCS ||
818                                conf->nb_tcs == ETH_8_TCS)) {
819                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
820                                                 "nb_tcs != %d or nb_tcs "
821                                                 "!= %d\n",
822                                                 port_id, ETH_4_TCS, ETH_8_TCS);
823                                 return (-EINVAL);
824                         }
825                 }
826
827                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
828                         const struct rte_eth_dcb_tx_conf *conf;
829
830                         if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
831                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
832                                                 "!= %d\n",
833                                                 port_id, ETH_DCB_NUM_QUEUES);
834                                 return (-EINVAL);
835                         }
836                         conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
837                         if (! (conf->nb_tcs == ETH_4_TCS ||
838                                conf->nb_tcs == ETH_8_TCS)) {
839                                 PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
840                                                 "nb_tcs != %d or nb_tcs "
841                                                 "!= %d\n",
842                                                 port_id, ETH_4_TCS, ETH_8_TCS);
843                                 return (-EINVAL);
844                         }
845                 }
846         }
847         return 0;
848 }
849
850 int
851 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
852                       const struct rte_eth_conf *dev_conf)
853 {
854         struct rte_eth_dev *dev;
855         struct rte_eth_dev_info dev_info;
856         int diag;
857
858         /* This function is only safe when called from the primary process
859          * in a multi-process setup*/
860         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
861
862         if (!rte_eth_dev_is_valid_port(port_id)) {
863                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
864                 return (-EINVAL);
865         }
866
867         dev = &rte_eth_devices[port_id];
868
869         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
870         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
871
872         if (dev->data->dev_started) {
873                 PMD_DEBUG_TRACE(
874                     "port %d must be stopped to allow configuration\n", port_id);
875                 return (-EBUSY);
876         }
877
878         /*
879          * Check that the numbers of RX and TX queues are not greater
880          * than the maximum number of RX and TX queues supported by the
881          * configured device.
882          */
883         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
884         if (nb_rx_q > dev_info.max_rx_queues) {
885                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
886                                 port_id, nb_rx_q, dev_info.max_rx_queues);
887                 return (-EINVAL);
888         }
889         if (nb_rx_q == 0) {
890                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
891                 return (-EINVAL);
892         }
893
894         if (nb_tx_q > dev_info.max_tx_queues) {
895                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
896                                 port_id, nb_tx_q, dev_info.max_tx_queues);
897                 return (-EINVAL);
898         }
899         if (nb_tx_q == 0) {
900                 PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
901                 return (-EINVAL);
902         }
903
904         /* Copy the dev_conf parameter into the dev structure */
905         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
906
907         /*
908          * If link state interrupt is enabled, check that the
909          * device supports it.
910          */
911         if (dev_conf->intr_conf.lsc == 1) {
912                 const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
913
914                 if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
915                         PMD_DEBUG_TRACE("driver %s does not support lsc\n",
916                                         pci_drv->name);
917                         return (-EINVAL);
918                 }
919         }
920
921         /*
922          * If jumbo frames are enabled, check that the maximum RX packet
923          * length is supported by the configured device.
924          */
925         if (dev_conf->rxmode.jumbo_frame == 1) {
926                 if (dev_conf->rxmode.max_rx_pkt_len >
927                     dev_info.max_rx_pktlen) {
928                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
929                                 " > max valid value %u\n",
930                                 port_id,
931                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
932                                 (unsigned)dev_info.max_rx_pktlen);
933                         return (-EINVAL);
934                 }
935                 else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
936                         PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
937                                 " < min valid value %u\n",
938                                 port_id,
939                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
940                                 (unsigned)ETHER_MIN_LEN);
941                         return (-EINVAL);
942                 }
943         } else {
944                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
945                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
946                         /* Use default value */
947                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
948                                                         ETHER_MAX_LEN;
949         }
950
951         /* multipe queue mode checking */
952         diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
953         if (diag != 0) {
954                 PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
955                                 port_id, diag);
956                 return diag;
957         }
958
959         /*
960          * Setup new number of RX/TX queues and reconfigure device.
961          */
962         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
963         if (diag != 0) {
964                 PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
965                                 port_id, diag);
966                 return diag;
967         }
968
969         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
970         if (diag != 0) {
971                 PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
972                                 port_id, diag);
973                 rte_eth_dev_rx_queue_config(dev, 0);
974                 return diag;
975         }
976
977         diag = (*dev->dev_ops->dev_configure)(dev);
978         if (diag != 0) {
979                 PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
980                                 port_id, diag);
981                 rte_eth_dev_rx_queue_config(dev, 0);
982                 rte_eth_dev_tx_queue_config(dev, 0);
983                 return diag;
984         }
985
986         return 0;
987 }
988
989 static void
990 rte_eth_dev_config_restore(uint8_t port_id)
991 {
992         struct rte_eth_dev *dev;
993         struct rte_eth_dev_info dev_info;
994         struct ether_addr addr;
995         uint16_t i;
996         uint32_t pool = 0;
997
998         dev = &rte_eth_devices[port_id];
999
1000         rte_eth_dev_info_get(port_id, &dev_info);
1001
1002         if (RTE_ETH_DEV_SRIOV(dev).active)
1003                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
1004
1005         /* replay MAC address configuration */
1006         for (i = 0; i < dev_info.max_mac_addrs; i++) {
1007                 addr = dev->data->mac_addrs[i];
1008
1009                 /* skip zero address */
1010                 if (is_zero_ether_addr(&addr))
1011                         continue;
1012
1013                 /* add address to the hardware */
1014                 if  (*dev->dev_ops->mac_addr_add &&
1015                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
1016                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
1017                 else {
1018                         PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
1019                                         port_id);
1020                         /* exit the loop but not return an error */
1021                         break;
1022                 }
1023         }
1024
1025         /* replay promiscuous configuration */
1026         if (rte_eth_promiscuous_get(port_id) == 1)
1027                 rte_eth_promiscuous_enable(port_id);
1028         else if (rte_eth_promiscuous_get(port_id) == 0)
1029                 rte_eth_promiscuous_disable(port_id);
1030
1031         /* replay allmulticast configuration */
1032         if (rte_eth_allmulticast_get(port_id) == 1)
1033                 rte_eth_allmulticast_enable(port_id);
1034         else if (rte_eth_allmulticast_get(port_id) == 0)
1035                 rte_eth_allmulticast_disable(port_id);
1036 }
1037
1038 int
1039 rte_eth_dev_start(uint8_t port_id)
1040 {
1041         struct rte_eth_dev *dev;
1042         int diag;
1043
1044         /* This function is only safe when called from the primary process
1045          * in a multi-process setup*/
1046         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1047
1048         if (!rte_eth_dev_is_valid_port(port_id)) {
1049                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1050                 return (-EINVAL);
1051         }
1052
1053         dev = &rte_eth_devices[port_id];
1054
1055         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1056
1057         if (dev->data->dev_started != 0) {
1058                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1059                         " already started\n",
1060                         port_id);
1061                 return (0);
1062         }
1063
1064         diag = (*dev->dev_ops->dev_start)(dev);
1065         if (diag == 0)
1066                 dev->data->dev_started = 1;
1067         else
1068                 return diag;
1069
1070         rte_eth_dev_config_restore(port_id);
1071
1072         if (dev->data->dev_conf.intr_conf.lsc != 0) {
1073                 FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1074                 (*dev->dev_ops->link_update)(dev, 0);
1075         }
1076         return 0;
1077 }
1078
1079 void
1080 rte_eth_dev_stop(uint8_t port_id)
1081 {
1082         struct rte_eth_dev *dev;
1083
1084         /* This function is only safe when called from the primary process
1085          * in a multi-process setup*/
1086         PROC_PRIMARY_OR_RET();
1087
1088         if (!rte_eth_dev_is_valid_port(port_id)) {
1089                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1090                 return;
1091         }
1092
1093         dev = &rte_eth_devices[port_id];
1094
1095         FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1096
1097         if (dev->data->dev_started == 0) {
1098                 PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1099                         " already stopped\n",
1100                         port_id);
1101                 return;
1102         }
1103
1104         dev->data->dev_started = 0;
1105         (*dev->dev_ops->dev_stop)(dev);
1106 }
1107
1108 int
1109 rte_eth_dev_set_link_up(uint8_t port_id)
1110 {
1111         struct rte_eth_dev *dev;
1112
1113         /* This function is only safe when called from the primary process
1114          * in a multi-process setup*/
1115         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1116
1117         if (!rte_eth_dev_is_valid_port(port_id)) {
1118                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1119                 return -EINVAL;
1120         }
1121
1122         dev = &rte_eth_devices[port_id];
1123
1124         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1125         return (*dev->dev_ops->dev_set_link_up)(dev);
1126 }
1127
1128 int
1129 rte_eth_dev_set_link_down(uint8_t port_id)
1130 {
1131         struct rte_eth_dev *dev;
1132
1133         /* This function is only safe when called from the primary process
1134          * in a multi-process setup*/
1135         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1136
1137         if (!rte_eth_dev_is_valid_port(port_id)) {
1138                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1139                 return -EINVAL;
1140         }
1141
1142         dev = &rte_eth_devices[port_id];
1143
1144         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1145         return (*dev->dev_ops->dev_set_link_down)(dev);
1146 }
1147
1148 void
1149 rte_eth_dev_close(uint8_t port_id)
1150 {
1151         struct rte_eth_dev *dev;
1152
1153         /* This function is only safe when called from the primary process
1154          * in a multi-process setup*/
1155         PROC_PRIMARY_OR_RET();
1156
1157         if (!rte_eth_dev_is_valid_port(port_id)) {
1158                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1159                 return;
1160         }
1161
1162         dev = &rte_eth_devices[port_id];
1163
1164         FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1165         dev->data->dev_started = 0;
1166         (*dev->dev_ops->dev_close)(dev);
1167 }
1168
1169 int
1170 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1171                        uint16_t nb_rx_desc, unsigned int socket_id,
1172                        const struct rte_eth_rxconf *rx_conf,
1173                        struct rte_mempool *mp)
1174 {
1175         int ret;
1176         uint32_t mbp_buf_size;
1177         struct rte_eth_dev *dev;
1178         struct rte_pktmbuf_pool_private *mbp_priv;
1179         struct rte_eth_dev_info dev_info;
1180
1181         /* This function is only safe when called from the primary process
1182          * in a multi-process setup*/
1183         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1184
1185         if (!rte_eth_dev_is_valid_port(port_id)) {
1186                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1187                 return (-EINVAL);
1188         }
1189
1190         dev = &rte_eth_devices[port_id];
1191         if (rx_queue_id >= dev->data->nb_rx_queues) {
1192                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1193                 return (-EINVAL);
1194         }
1195
1196         if (dev->data->dev_started) {
1197                 PMD_DEBUG_TRACE(
1198                     "port %d must be stopped to allow configuration\n", port_id);
1199                 return -EBUSY;
1200         }
1201
1202         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1203         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1204
1205         /*
1206          * Check the size of the mbuf data buffer.
1207          * This value must be provided in the private data of the memory pool.
1208          * First check that the memory pool has a valid private data.
1209          */
1210         rte_eth_dev_info_get(port_id, &dev_info);
1211         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1212                 PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1213                                 mp->name, (int) mp->private_data_size,
1214                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1215                 return (-ENOSPC);
1216         }
1217         mbp_priv = rte_mempool_get_priv(mp);
1218         mbp_buf_size = mbp_priv->mbuf_data_room_size;
1219
1220         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1221                 PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1222                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1223                                 "=%d)\n",
1224                                 mp->name,
1225                                 (int)mbp_buf_size,
1226                                 (int)(RTE_PKTMBUF_HEADROOM +
1227                                       dev_info.min_rx_bufsize),
1228                                 (int)RTE_PKTMBUF_HEADROOM,
1229                                 (int)dev_info.min_rx_bufsize);
1230                 return (-EINVAL);
1231         }
1232
1233         if (rx_conf == NULL)
1234                 rx_conf = &dev_info.default_rxconf;
1235
1236         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1237                                               socket_id, rx_conf, mp);
1238         if (!ret) {
1239                 if (!dev->data->min_rx_buf_size ||
1240                     dev->data->min_rx_buf_size > mbp_buf_size)
1241                         dev->data->min_rx_buf_size = mbp_buf_size;
1242         }
1243
1244         return ret;
1245 }
1246
1247 int
1248 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1249                        uint16_t nb_tx_desc, unsigned int socket_id,
1250                        const struct rte_eth_txconf *tx_conf)
1251 {
1252         struct rte_eth_dev *dev;
1253         struct rte_eth_dev_info dev_info;
1254
1255         /* This function is only safe when called from the primary process
1256          * in a multi-process setup*/
1257         PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
1258
1259         if (!rte_eth_dev_is_valid_port(port_id)) {
1260                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1261                 return (-EINVAL);
1262         }
1263
1264         dev = &rte_eth_devices[port_id];
1265         if (tx_queue_id >= dev->data->nb_tx_queues) {
1266                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1267                 return (-EINVAL);
1268         }
1269
1270         if (dev->data->dev_started) {
1271                 PMD_DEBUG_TRACE(
1272                     "port %d must be stopped to allow configuration\n", port_id);
1273                 return -EBUSY;
1274         }
1275
1276         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1277         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1278
1279         rte_eth_dev_info_get(port_id, &dev_info);
1280
1281         if (tx_conf == NULL)
1282                 tx_conf = &dev_info.default_txconf;
1283
1284         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1285                                                socket_id, tx_conf);
1286 }
1287
1288 void
1289 rte_eth_promiscuous_enable(uint8_t port_id)
1290 {
1291         struct rte_eth_dev *dev;
1292
1293         if (!rte_eth_dev_is_valid_port(port_id)) {
1294                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1295                 return;
1296         }
1297
1298         dev = &rte_eth_devices[port_id];
1299
1300         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1301         (*dev->dev_ops->promiscuous_enable)(dev);
1302         dev->data->promiscuous = 1;
1303 }
1304
1305 void
1306 rte_eth_promiscuous_disable(uint8_t port_id)
1307 {
1308         struct rte_eth_dev *dev;
1309
1310         if (!rte_eth_dev_is_valid_port(port_id)) {
1311                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1312                 return;
1313         }
1314
1315         dev = &rte_eth_devices[port_id];
1316
1317         FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1318         dev->data->promiscuous = 0;
1319         (*dev->dev_ops->promiscuous_disable)(dev);
1320 }
1321
1322 int
1323 rte_eth_promiscuous_get(uint8_t port_id)
1324 {
1325         struct rte_eth_dev *dev;
1326
1327         if (!rte_eth_dev_is_valid_port(port_id)) {
1328                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1329                 return -1;
1330         }
1331
1332         dev = &rte_eth_devices[port_id];
1333         return dev->data->promiscuous;
1334 }
1335
1336 void
1337 rte_eth_allmulticast_enable(uint8_t port_id)
1338 {
1339         struct rte_eth_dev *dev;
1340
1341         if (!rte_eth_dev_is_valid_port(port_id)) {
1342                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1343                 return;
1344         }
1345
1346         dev = &rte_eth_devices[port_id];
1347
1348         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1349         (*dev->dev_ops->allmulticast_enable)(dev);
1350         dev->data->all_multicast = 1;
1351 }
1352
1353 void
1354 rte_eth_allmulticast_disable(uint8_t port_id)
1355 {
1356         struct rte_eth_dev *dev;
1357
1358         if (!rte_eth_dev_is_valid_port(port_id)) {
1359                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1360                 return;
1361         }
1362
1363         dev = &rte_eth_devices[port_id];
1364
1365         FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1366         dev->data->all_multicast = 0;
1367         (*dev->dev_ops->allmulticast_disable)(dev);
1368 }
1369
1370 int
1371 rte_eth_allmulticast_get(uint8_t port_id)
1372 {
1373         struct rte_eth_dev *dev;
1374
1375         if (!rte_eth_dev_is_valid_port(port_id)) {
1376                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1377                 return -1;
1378         }
1379
1380         dev = &rte_eth_devices[port_id];
1381         return dev->data->all_multicast;
1382 }
1383
1384 static inline int
1385 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1386                                 struct rte_eth_link *link)
1387 {
1388         struct rte_eth_link *dst = link;
1389         struct rte_eth_link *src = &(dev->data->dev_link);
1390
1391         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1392                                         *(uint64_t *)src) == 0)
1393                 return -1;
1394
1395         return 0;
1396 }
1397
1398 void
1399 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1400 {
1401         struct rte_eth_dev *dev;
1402
1403         if (!rte_eth_dev_is_valid_port(port_id)) {
1404                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1405                 return;
1406         }
1407
1408         dev = &rte_eth_devices[port_id];
1409
1410         if (dev->data->dev_conf.intr_conf.lsc != 0)
1411                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1412         else {
1413                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1414                 (*dev->dev_ops->link_update)(dev, 1);
1415                 *eth_link = dev->data->dev_link;
1416         }
1417 }
1418
1419 void
1420 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1421 {
1422         struct rte_eth_dev *dev;
1423
1424         if (!rte_eth_dev_is_valid_port(port_id)) {
1425                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1426                 return;
1427         }
1428
1429         dev = &rte_eth_devices[port_id];
1430
1431         if (dev->data->dev_conf.intr_conf.lsc != 0)
1432                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1433         else {
1434                 FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1435                 (*dev->dev_ops->link_update)(dev, 0);
1436                 *eth_link = dev->data->dev_link;
1437         }
1438 }
1439
1440 int
1441 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1442 {
1443         struct rte_eth_dev *dev;
1444
1445         if (!rte_eth_dev_is_valid_port(port_id)) {
1446                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1447                 return (-ENODEV);
1448         }
1449
1450         dev = &rte_eth_devices[port_id];
1451         memset(stats, 0, sizeof(*stats));
1452
1453         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1454         (*dev->dev_ops->stats_get)(dev, stats);
1455         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1456         return 0;
1457 }
1458
1459 void
1460 rte_eth_stats_reset(uint8_t port_id)
1461 {
1462         struct rte_eth_dev *dev;
1463
1464         if (!rte_eth_dev_is_valid_port(port_id)) {
1465                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1466                 return;
1467         }
1468
1469         dev = &rte_eth_devices[port_id];
1470
1471         FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1472         (*dev->dev_ops->stats_reset)(dev);
1473 }
1474
1475 /* retrieve ethdev extended statistics */
1476 int
1477 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
1478         unsigned n)
1479 {
1480         struct rte_eth_stats eth_stats;
1481         struct rte_eth_dev *dev;
1482         unsigned count, i, q;
1483         uint64_t val;
1484         char *stats_ptr;
1485
1486         if (!rte_eth_dev_is_valid_port(port_id)) {
1487                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1488                 return -1;
1489         }
1490
1491         dev = &rte_eth_devices[port_id];
1492
1493         /* implemented by the driver */
1494         if (dev->dev_ops->xstats_get != NULL)
1495                 return (*dev->dev_ops->xstats_get)(dev, xstats, n);
1496
1497         /* else, return generic statistics */
1498         count = RTE_NB_STATS;
1499         count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1500         count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1501         if (n < count)
1502                 return count;
1503
1504         /* now fill the xstats structure */
1505
1506         count = 0;
1507         memset(&eth_stats, 0, sizeof(eth_stats));
1508         rte_eth_stats_get(port_id, &eth_stats);
1509
1510         /* global stats */
1511         for (i = 0; i < RTE_NB_STATS; i++) {
1512                 stats_ptr = (char *)&eth_stats + rte_stats_strings[i].offset;
1513                 val = *(uint64_t *)stats_ptr;
1514                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1515                         "%s", rte_stats_strings[i].name);
1516                 xstats[count++].value = val;
1517         }
1518
1519         /* per-rxq stats */
1520         for (q = 0; q < dev->data->nb_rx_queues; q++) {
1521                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1522                         stats_ptr = (char *)&eth_stats;
1523                         stats_ptr += rte_rxq_stats_strings[i].offset;
1524                         stats_ptr += q * sizeof(uint64_t);
1525                         val = *(uint64_t *)stats_ptr;
1526                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1527                                 "rx_queue_%u_%s", q,
1528                                 rte_rxq_stats_strings[i].name);
1529                         xstats[count++].value = val;
1530                 }
1531         }
1532
1533         /* per-txq stats */
1534         for (q = 0; q < dev->data->nb_tx_queues; q++) {
1535                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1536                         stats_ptr = (char *)&eth_stats;
1537                         stats_ptr += rte_txq_stats_strings[i].offset;
1538                         stats_ptr += q * sizeof(uint64_t);
1539                         val = *(uint64_t *)stats_ptr;
1540                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1541                                 "tx_queue_%u_%s", q,
1542                                 rte_txq_stats_strings[i].name);
1543                         xstats[count++].value = val;
1544                 }
1545         }
1546
1547         return count;
1548 }
1549
1550 /* reset ethdev extended statistics */
1551 void
1552 rte_eth_xstats_reset(uint8_t port_id)
1553 {
1554         struct rte_eth_dev *dev;
1555
1556         if (!rte_eth_dev_is_valid_port(port_id)) {
1557                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1558                 return;
1559         }
1560
1561         dev = &rte_eth_devices[port_id];
1562
1563         /* implemented by the driver */
1564         if (dev->dev_ops->xstats_reset != NULL) {
1565                 (*dev->dev_ops->xstats_reset)(dev);
1566                 return;
1567         }
1568
1569         /* fallback to default */
1570         rte_eth_stats_reset(port_id);
1571 }
1572
1573 static int
1574 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1575                 uint8_t is_rx)
1576 {
1577         struct rte_eth_dev *dev;
1578
1579         if (!rte_eth_dev_is_valid_port(port_id)) {
1580                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1581                 return -ENODEV;
1582         }
1583
1584         dev = &rte_eth_devices[port_id];
1585
1586         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1587         return (*dev->dev_ops->queue_stats_mapping_set)
1588                         (dev, queue_id, stat_idx, is_rx);
1589 }
1590
1591
1592 int
1593 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1594                 uint8_t stat_idx)
1595 {
1596         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1597                         STAT_QMAP_TX);
1598 }
1599
1600
1601 int
1602 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1603                 uint8_t stat_idx)
1604 {
1605         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1606                         STAT_QMAP_RX);
1607 }
1608
1609
1610 void
1611 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1612 {
1613         struct rte_eth_dev *dev;
1614
1615         if (!rte_eth_dev_is_valid_port(port_id)) {
1616                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1617                 return;
1618         }
1619
1620         dev = &rte_eth_devices[port_id];
1621
1622         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1623
1624         FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1625         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1626         dev_info->pci_dev = dev->pci_dev;
1627         if (dev->driver)
1628                 dev_info->driver_name = dev->driver->pci_drv.name;
1629 }
1630
1631 void
1632 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1633 {
1634         struct rte_eth_dev *dev;
1635
1636         if (!rte_eth_dev_is_valid_port(port_id)) {
1637                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1638                 return;
1639         }
1640
1641         dev = &rte_eth_devices[port_id];
1642         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1643 }
1644
1645
1646 int
1647 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1648 {
1649         struct rte_eth_dev *dev;
1650
1651         if (!rte_eth_dev_is_valid_port(port_id)) {
1652                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1653                 return (-ENODEV);
1654         }
1655
1656         dev = &rte_eth_devices[port_id];
1657         *mtu = dev->data->mtu;
1658         return 0;
1659 }
1660
1661 int
1662 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1663 {
1664         int ret;
1665         struct rte_eth_dev *dev;
1666
1667         if (!rte_eth_dev_is_valid_port(port_id)) {
1668                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1669                 return (-ENODEV);
1670         }
1671
1672         dev = &rte_eth_devices[port_id];
1673         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1674
1675         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1676         if (!ret)
1677                 dev->data->mtu = mtu;
1678
1679         return ret;
1680 }
1681
1682 int
1683 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1684 {
1685         struct rte_eth_dev *dev;
1686
1687         if (!rte_eth_dev_is_valid_port(port_id)) {
1688                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1689                 return (-ENODEV);
1690         }
1691
1692         dev = &rte_eth_devices[port_id];
1693         if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1694                 PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1695                 return (-ENOSYS);
1696         }
1697
1698         if (vlan_id > 4095) {
1699                 PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1700                                 port_id, (unsigned) vlan_id);
1701                 return (-EINVAL);
1702         }
1703         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1704         (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1705         return (0);
1706 }
1707
1708 int
1709 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1710 {
1711         struct rte_eth_dev *dev;
1712
1713         if (!rte_eth_dev_is_valid_port(port_id)) {
1714                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1715                 return (-ENODEV);
1716         }
1717
1718         dev = &rte_eth_devices[port_id];
1719         if (rx_queue_id >= dev->data->nb_rx_queues) {
1720                 PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1721                 return (-EINVAL);
1722         }
1723
1724         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1725         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1726
1727         return (0);
1728 }
1729
1730 int
1731 rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
1732 {
1733         struct rte_eth_dev *dev;
1734
1735         if (!rte_eth_dev_is_valid_port(port_id)) {
1736                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1737                 return (-ENODEV);
1738         }
1739
1740         dev = &rte_eth_devices[port_id];
1741         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1742         (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
1743
1744         return (0);
1745 }
1746
1747 int
1748 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1749 {
1750         struct rte_eth_dev *dev;
1751         int ret = 0;
1752         int mask = 0;
1753         int cur, org = 0;
1754
1755         if (!rte_eth_dev_is_valid_port(port_id)) {
1756                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1757                 return (-ENODEV);
1758         }
1759
1760         dev = &rte_eth_devices[port_id];
1761
1762         /*check which option changed by application*/
1763         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1764         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1765         if (cur != org){
1766                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1767                 mask |= ETH_VLAN_STRIP_MASK;
1768         }
1769
1770         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1771         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1772         if (cur != org){
1773                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1774                 mask |= ETH_VLAN_FILTER_MASK;
1775         }
1776
1777         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1778         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1779         if (cur != org){
1780                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1781                 mask |= ETH_VLAN_EXTEND_MASK;
1782         }
1783
1784         /*no change*/
1785         if(mask == 0)
1786                 return ret;
1787
1788         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1789         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1790
1791         return ret;
1792 }
1793
1794 int
1795 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1796 {
1797         struct rte_eth_dev *dev;
1798         int ret = 0;
1799
1800         if (!rte_eth_dev_is_valid_port(port_id)) {
1801                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1802                 return (-ENODEV);
1803         }
1804
1805         dev = &rte_eth_devices[port_id];
1806
1807         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1808                 ret |= ETH_VLAN_STRIP_OFFLOAD ;
1809
1810         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1811                 ret |= ETH_VLAN_FILTER_OFFLOAD ;
1812
1813         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1814                 ret |= ETH_VLAN_EXTEND_OFFLOAD ;
1815
1816         return ret;
1817 }
1818
1819 int
1820 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1821 {
1822         struct rte_eth_dev *dev;
1823
1824         if (!rte_eth_dev_is_valid_port(port_id)) {
1825                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1826                 return (-ENODEV);
1827         }
1828
1829         dev = &rte_eth_devices[port_id];
1830         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1831         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1832
1833         return 0;
1834 }
1835
1836 int
1837 rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
1838                                       struct rte_fdir_filter *fdir_filter,
1839                                       uint8_t queue)
1840 {
1841         struct rte_eth_dev *dev;
1842
1843         if (!rte_eth_dev_is_valid_port(port_id)) {
1844                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1845                 return (-ENODEV);
1846         }
1847
1848         dev = &rte_eth_devices[port_id];
1849
1850         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1851                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1852                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1853                 return (-ENOSYS);
1854         }
1855
1856         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1857              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1858             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1859                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1860                                 "None l4type, source & destinations ports " \
1861                                 "should be null!\n");
1862                 return (-EINVAL);
1863         }
1864
1865         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
1866         return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
1867                                                                 queue);
1868 }
1869
1870 int
1871 rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
1872                                          struct rte_fdir_filter *fdir_filter,
1873                                          uint8_t queue)
1874 {
1875         struct rte_eth_dev *dev;
1876
1877         if (!rte_eth_dev_is_valid_port(port_id)) {
1878                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1879                 return (-ENODEV);
1880         }
1881
1882         dev = &rte_eth_devices[port_id];
1883
1884         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1885                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1886                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1887                 return (-ENOSYS);
1888         }
1889
1890         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1891              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1892             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1893                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1894                                 "None l4type, source & destinations ports " \
1895                                 "should be null!\n");
1896                 return (-EINVAL);
1897         }
1898
1899         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
1900         return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
1901                                                                 queue);
1902
1903 }
1904
1905 int
1906 rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
1907                                          struct rte_fdir_filter *fdir_filter)
1908 {
1909         struct rte_eth_dev *dev;
1910
1911         if (!rte_eth_dev_is_valid_port(port_id)) {
1912                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1913                 return (-ENODEV);
1914         }
1915
1916         dev = &rte_eth_devices[port_id];
1917
1918         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
1919                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1920                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1921                 return (-ENOSYS);
1922         }
1923
1924         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1925              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1926             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1927                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1928                                 "None l4type source & destinations ports " \
1929                                 "should be null!\n");
1930                 return (-EINVAL);
1931         }
1932
1933         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
1934         return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
1935 }
1936
1937 int
1938 rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
1939 {
1940         struct rte_eth_dev *dev;
1941
1942         if (!rte_eth_dev_is_valid_port(port_id)) {
1943                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1944                 return (-ENODEV);
1945         }
1946
1947         dev = &rte_eth_devices[port_id];
1948         if (! (dev->data->dev_conf.fdir_conf.mode)) {
1949                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
1950                 return (-ENOSYS);
1951         }
1952
1953         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
1954
1955         (*dev->dev_ops->fdir_infos_get)(dev, fdir);
1956         return (0);
1957 }
1958
1959 int
1960 rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
1961                                     struct rte_fdir_filter *fdir_filter,
1962                                     uint16_t soft_id, uint8_t queue,
1963                                     uint8_t drop)
1964 {
1965         struct rte_eth_dev *dev;
1966
1967         if (!rte_eth_dev_is_valid_port(port_id)) {
1968                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
1969                 return (-ENODEV);
1970         }
1971
1972         dev = &rte_eth_devices[port_id];
1973
1974         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1975                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
1976                                 port_id, dev->data->dev_conf.fdir_conf.mode);
1977                 return (-ENOSYS);
1978         }
1979
1980         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
1981              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
1982             && (fdir_filter->port_src || fdir_filter->port_dst)) {
1983                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
1984                                 "None l4type, source & destinations ports " \
1985                                 "should be null!\n");
1986                 return (-EINVAL);
1987         }
1988
1989         /* For now IPv6 is not supported with perfect filter */
1990         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
1991                 return (-ENOTSUP);
1992
1993         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
1994         return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
1995                                                                 soft_id, queue,
1996                                                                 drop);
1997 }
1998
1999 int
2000 rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
2001                                        struct rte_fdir_filter *fdir_filter,
2002                                        uint16_t soft_id, uint8_t queue,
2003                                        uint8_t drop)
2004 {
2005         struct rte_eth_dev *dev;
2006
2007         if (!rte_eth_dev_is_valid_port(port_id)) {
2008                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2009                 return (-ENODEV);
2010         }
2011
2012         dev = &rte_eth_devices[port_id];
2013
2014         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2015                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2016                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2017                 return (-ENOSYS);
2018         }
2019
2020         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2021              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2022             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2023                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2024                                 "None l4type, source & destinations ports " \
2025                                 "should be null!\n");
2026                 return (-EINVAL);
2027         }
2028
2029         /* For now IPv6 is not supported with perfect filter */
2030         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2031                 return (-ENOTSUP);
2032
2033         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
2034         return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
2035                                                         soft_id, queue, drop);
2036 }
2037
2038 int
2039 rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
2040                                        struct rte_fdir_filter *fdir_filter,
2041                                        uint16_t soft_id)
2042 {
2043         struct rte_eth_dev *dev;
2044
2045         if (!rte_eth_dev_is_valid_port(port_id)) {
2046                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2047                 return (-ENODEV);
2048         }
2049
2050         dev = &rte_eth_devices[port_id];
2051
2052         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
2053                 PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
2054                                 port_id, dev->data->dev_conf.fdir_conf.mode);
2055                 return (-ENOSYS);
2056         }
2057
2058         if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
2059              || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
2060             && (fdir_filter->port_src || fdir_filter->port_dst)) {
2061                 PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
2062                                 "None l4type, source & destinations ports " \
2063                                 "should be null!\n");
2064                 return (-EINVAL);
2065         }
2066
2067         /* For now IPv6 is not supported with perfect filter */
2068         if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
2069                 return (-ENOTSUP);
2070
2071         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
2072         return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
2073                                                                 soft_id);
2074 }
2075
2076 int
2077 rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
2078 {
2079         struct rte_eth_dev *dev;
2080
2081         if (!rte_eth_dev_is_valid_port(port_id)) {
2082                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2083                 return (-ENODEV);
2084         }
2085
2086         dev = &rte_eth_devices[port_id];
2087         if (! (dev->data->dev_conf.fdir_conf.mode)) {
2088                 PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
2089                 return (-ENOSYS);
2090         }
2091
2092         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
2093         return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
2094 }
2095
2096 int
2097 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2098 {
2099         struct rte_eth_dev *dev;
2100
2101         if (!rte_eth_dev_is_valid_port(port_id)) {
2102                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2103                 return (-ENODEV);
2104         }
2105
2106         dev = &rte_eth_devices[port_id];
2107         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2108         memset(fc_conf, 0, sizeof(*fc_conf));
2109         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2110 }
2111
2112 int
2113 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2114 {
2115         struct rte_eth_dev *dev;
2116
2117         if (!rte_eth_dev_is_valid_port(port_id)) {
2118                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2119                 return (-ENODEV);
2120         }
2121
2122         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2123                 PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2124                 return (-EINVAL);
2125         }
2126
2127         dev = &rte_eth_devices[port_id];
2128         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2129         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2130 }
2131
2132 int
2133 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
2134 {
2135         struct rte_eth_dev *dev;
2136
2137         if (!rte_eth_dev_is_valid_port(port_id)) {
2138                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2139                 return (-ENODEV);
2140         }
2141
2142         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2143                 PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2144                 return (-EINVAL);
2145         }
2146
2147         dev = &rte_eth_devices[port_id];
2148         /* High water, low water validation are device specific */
2149         if  (*dev->dev_ops->priority_flow_ctrl_set)
2150                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2151         return (-ENOTSUP);
2152 }
2153
2154 static inline int
2155 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2156                         uint16_t reta_size)
2157 {
2158         uint16_t i, num;
2159
2160         if (!reta_conf)
2161                 return -EINVAL;
2162
2163         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
2164                 PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
2165                                                         RTE_RETA_GROUP_SIZE);
2166                 return -EINVAL;
2167         }
2168
2169         num = reta_size / RTE_RETA_GROUP_SIZE;
2170         for (i = 0; i < num; i++) {
2171                 if (reta_conf[i].mask)
2172                         return 0;
2173         }
2174
2175         return -EINVAL;
2176 }
2177
2178 static inline int
2179 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2180                          uint16_t reta_size,
2181                          uint8_t max_rxq)
2182 {
2183         uint16_t i, idx, shift;
2184
2185         if (!reta_conf)
2186                 return -EINVAL;
2187
2188         if (max_rxq == 0) {
2189                 PMD_DEBUG_TRACE("No receive queue is available\n");
2190                 return -EINVAL;
2191         }
2192
2193         for (i = 0; i < reta_size; i++) {
2194                 idx = i / RTE_RETA_GROUP_SIZE;
2195                 shift = i % RTE_RETA_GROUP_SIZE;
2196                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2197                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2198                         PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2199                                 "the maximum rxq index: %u\n", idx, shift,
2200                                 reta_conf[idx].reta[shift], max_rxq);
2201                         return -EINVAL;
2202                 }
2203         }
2204
2205         return 0;
2206 }
2207
2208 int
2209 rte_eth_dev_rss_reta_update(uint8_t port_id,
2210                             struct rte_eth_rss_reta_entry64 *reta_conf,
2211                             uint16_t reta_size)
2212 {
2213         struct rte_eth_dev *dev;
2214         int ret;
2215
2216         if (!rte_eth_dev_is_valid_port(port_id)) {
2217                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2218                 return -ENODEV;
2219         }
2220
2221         /* Check mask bits */
2222         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2223         if (ret < 0)
2224                 return ret;
2225
2226         dev = &rte_eth_devices[port_id];
2227
2228         /* Check entry value */
2229         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2230                                 dev->data->nb_rx_queues);
2231         if (ret < 0)
2232                 return ret;
2233
2234         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2235         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2236 }
2237
2238 int
2239 rte_eth_dev_rss_reta_query(uint8_t port_id,
2240                            struct rte_eth_rss_reta_entry64 *reta_conf,
2241                            uint16_t reta_size)
2242 {
2243         struct rte_eth_dev *dev;
2244         int ret;
2245
2246         if (port_id >= nb_ports) {
2247                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2248                 return -ENODEV;
2249         }
2250
2251         /* Check mask bits */
2252         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2253         if (ret < 0)
2254                 return ret;
2255
2256         dev = &rte_eth_devices[port_id];
2257         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2258         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2259 }
2260
2261 int
2262 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2263 {
2264         struct rte_eth_dev *dev;
2265         uint16_t rss_hash_protos;
2266
2267         if (!rte_eth_dev_is_valid_port(port_id)) {
2268                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2269                 return (-ENODEV);
2270         }
2271
2272         rss_hash_protos = rss_conf->rss_hf;
2273         if ((rss_hash_protos != 0) &&
2274             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2275                 PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2276                                 rss_hash_protos);
2277                 return (-EINVAL);
2278         }
2279         dev = &rte_eth_devices[port_id];
2280         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2281         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2282 }
2283
2284 int
2285 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2286                               struct rte_eth_rss_conf *rss_conf)
2287 {
2288         struct rte_eth_dev *dev;
2289
2290         if (!rte_eth_dev_is_valid_port(port_id)) {
2291                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2292                 return (-ENODEV);
2293         }
2294
2295         dev = &rte_eth_devices[port_id];
2296         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2297         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2298 }
2299
2300 int
2301 rte_eth_dev_udp_tunnel_add(uint8_t port_id,
2302                            struct rte_eth_udp_tunnel *udp_tunnel)
2303 {
2304         struct rte_eth_dev *dev;
2305
2306         if (!rte_eth_dev_is_valid_port(port_id)) {
2307                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2308                 return -ENODEV;
2309         }
2310
2311         if (udp_tunnel == NULL) {
2312                 PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2313                 return -EINVAL;
2314         }
2315
2316         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2317                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2318                 return -EINVAL;
2319         }
2320
2321         dev = &rte_eth_devices[port_id];
2322         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
2323         return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
2324 }
2325
2326 int
2327 rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
2328                               struct rte_eth_udp_tunnel *udp_tunnel)
2329 {
2330         struct rte_eth_dev *dev;
2331
2332         if (!rte_eth_dev_is_valid_port(port_id)) {
2333                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2334                 return -ENODEV;
2335         }
2336
2337         dev = &rte_eth_devices[port_id];
2338
2339         if (udp_tunnel == NULL) {
2340                 PMD_DEBUG_TRACE("Invalid udp_tunnel parametr\n");
2341                 return -EINVAL;
2342         }
2343
2344         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2345                 PMD_DEBUG_TRACE("Invalid tunnel type\n");
2346                 return -EINVAL;
2347         }
2348
2349         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
2350         return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
2351 }
2352
2353 int
2354 rte_eth_led_on(uint8_t port_id)
2355 {
2356         struct rte_eth_dev *dev;
2357
2358         if (!rte_eth_dev_is_valid_port(port_id)) {
2359                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2360                 return (-ENODEV);
2361         }
2362
2363         dev = &rte_eth_devices[port_id];
2364         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2365         return ((*dev->dev_ops->dev_led_on)(dev));
2366 }
2367
2368 int
2369 rte_eth_led_off(uint8_t port_id)
2370 {
2371         struct rte_eth_dev *dev;
2372
2373         if (!rte_eth_dev_is_valid_port(port_id)) {
2374                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2375                 return (-ENODEV);
2376         }
2377
2378         dev = &rte_eth_devices[port_id];
2379         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2380         return ((*dev->dev_ops->dev_led_off)(dev));
2381 }
2382
2383 /*
2384  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2385  * an empty spot.
2386  */
2387 static inline int
2388 get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2389 {
2390         struct rte_eth_dev_info dev_info;
2391         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2392         unsigned i;
2393
2394         rte_eth_dev_info_get(port_id, &dev_info);
2395
2396         for (i = 0; i < dev_info.max_mac_addrs; i++)
2397                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2398                         return i;
2399
2400         return -1;
2401 }
2402
2403 static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
2404
2405 int
2406 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2407                         uint32_t pool)
2408 {
2409         struct rte_eth_dev *dev;
2410         int index;
2411         uint64_t pool_mask;
2412
2413         if (!rte_eth_dev_is_valid_port(port_id)) {
2414                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2415                 return (-ENODEV);
2416         }
2417
2418         dev = &rte_eth_devices[port_id];
2419         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2420
2421         if (is_zero_ether_addr(addr)) {
2422                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2423                         port_id);
2424                 return (-EINVAL);
2425         }
2426         if (pool >= ETH_64_POOLS) {
2427                 PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
2428                 return (-EINVAL);
2429         }
2430
2431         index = get_mac_addr_index(port_id, addr);
2432         if (index < 0) {
2433                 index = get_mac_addr_index(port_id, &null_mac_addr);
2434                 if (index < 0) {
2435                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2436                                 port_id);
2437                         return (-ENOSPC);
2438                 }
2439         } else {
2440                 pool_mask = dev->data->mac_pool_sel[index];
2441
2442                 /* Check if both MAC address and pool is alread there, and do nothing */
2443                 if (pool_mask & (1ULL << pool))
2444                         return 0;
2445         }
2446
2447         /* Update NIC */
2448         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2449
2450         /* Update address in NIC data structure */
2451         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2452
2453         /* Update pool bitmap in NIC data structure */
2454         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2455
2456         return 0;
2457 }
2458
2459 int
2460 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2461 {
2462         struct rte_eth_dev *dev;
2463         int index;
2464
2465         if (!rte_eth_dev_is_valid_port(port_id)) {
2466                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2467                 return (-ENODEV);
2468         }
2469
2470         dev = &rte_eth_devices[port_id];
2471         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2472
2473         index = get_mac_addr_index(port_id, addr);
2474         if (index == 0) {
2475                 PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2476                 return (-EADDRINUSE);
2477         } else if (index < 0)
2478                 return 0;  /* Do nothing if address wasn't found */
2479
2480         /* Update NIC */
2481         (*dev->dev_ops->mac_addr_remove)(dev, index);
2482
2483         /* Update address in NIC data structure */
2484         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2485
2486         /* reset pool bitmap */
2487         dev->data->mac_pool_sel[index] = 0;
2488
2489         return 0;
2490 }
2491
2492 int
2493 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2494                                 uint16_t rx_mode, uint8_t on)
2495 {
2496         uint16_t num_vfs;
2497         struct rte_eth_dev *dev;
2498         struct rte_eth_dev_info dev_info;
2499
2500         if (!rte_eth_dev_is_valid_port(port_id)) {
2501                 PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
2502                                 port_id);
2503                 return (-ENODEV);
2504         }
2505
2506         dev = &rte_eth_devices[port_id];
2507         rte_eth_dev_info_get(port_id, &dev_info);
2508
2509         num_vfs = dev_info.max_vfs;
2510         if (vf > num_vfs)
2511         {
2512                 PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2513                 return (-EINVAL);
2514         }
2515         if (rx_mode == 0)
2516         {
2517                 PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2518                 return (-EINVAL);
2519         }
2520         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2521         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2522 }
2523
2524 /*
2525  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2526  * an empty spot.
2527  */
2528 static inline int
2529 get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
2530 {
2531         struct rte_eth_dev_info dev_info;
2532         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2533         unsigned i;
2534
2535         rte_eth_dev_info_get(port_id, &dev_info);
2536         if (!dev->data->hash_mac_addrs)
2537                 return -1;
2538
2539         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2540                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2541                         ETHER_ADDR_LEN) == 0)
2542                         return i;
2543
2544         return -1;
2545 }
2546
2547 int
2548 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2549                                 uint8_t on)
2550 {
2551         int index;
2552         int ret;
2553         struct rte_eth_dev *dev;
2554
2555         if (!rte_eth_dev_is_valid_port(port_id)) {
2556                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2557                         port_id);
2558                 return (-ENODEV);
2559         }
2560
2561         dev = &rte_eth_devices[port_id];
2562         if (is_zero_ether_addr(addr)) {
2563                 PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2564                         port_id);
2565                 return (-EINVAL);
2566         }
2567
2568         index = get_hash_mac_addr_index(port_id, addr);
2569         /* Check if it's already there, and do nothing */
2570         if ((index >= 0) && (on))
2571                 return 0;
2572
2573         if (index < 0) {
2574                 if (!on) {
2575                         PMD_DEBUG_TRACE("port %d: the MAC address was not"
2576                                 "set in UTA\n", port_id);
2577                         return (-EINVAL);
2578                 }
2579
2580                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2581                 if (index < 0) {
2582                         PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2583                                         port_id);
2584                         return (-ENOSPC);
2585                 }
2586         }
2587
2588         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2589         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2590         if (ret == 0) {
2591                 /* Update address in NIC data structure */
2592                 if (on)
2593                         ether_addr_copy(addr,
2594                                         &dev->data->hash_mac_addrs[index]);
2595                 else
2596                         ether_addr_copy(&null_mac_addr,
2597                                         &dev->data->hash_mac_addrs[index]);
2598         }
2599
2600         return ret;
2601 }
2602
2603 int
2604 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2605 {
2606         struct rte_eth_dev *dev;
2607
2608         if (!rte_eth_dev_is_valid_port(port_id)) {
2609                 PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
2610                         port_id);
2611                 return (-ENODEV);
2612         }
2613
2614         dev = &rte_eth_devices[port_id];
2615
2616         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2617         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2618 }
2619
2620 int
2621 rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
2622 {
2623         uint16_t num_vfs;
2624         struct rte_eth_dev *dev;
2625         struct rte_eth_dev_info dev_info;
2626
2627         if (!rte_eth_dev_is_valid_port(port_id)) {
2628                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2629                 return (-ENODEV);
2630         }
2631
2632         dev = &rte_eth_devices[port_id];
2633         rte_eth_dev_info_get(port_id, &dev_info);
2634
2635         num_vfs = dev_info.max_vfs;
2636         if (vf > num_vfs)
2637         {
2638                 PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2639                 return (-EINVAL);
2640         }
2641
2642         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2643         return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
2644 }
2645
2646 int
2647 rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
2648 {
2649         uint16_t num_vfs;
2650         struct rte_eth_dev *dev;
2651         struct rte_eth_dev_info dev_info;
2652
2653         if (!rte_eth_dev_is_valid_port(port_id)) {
2654                 PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
2655                 return (-ENODEV);
2656         }
2657
2658         dev = &rte_eth_devices[port_id];
2659         rte_eth_dev_info_get(port_id, &dev_info);
2660
2661         num_vfs = dev_info.max_vfs;
2662         if (vf > num_vfs)
2663         {
2664                 PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2665                 return (-EINVAL);
2666         }
2667
2668         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2669         return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
2670 }
2671
2672 int
2673 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2674                                  uint64_t vf_mask,uint8_t vlan_on)
2675 {
2676         struct rte_eth_dev *dev;
2677
2678         if (!rte_eth_dev_is_valid_port(port_id)) {
2679                 PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
2680                                 port_id);
2681                 return (-ENODEV);
2682         }
2683         dev = &rte_eth_devices[port_id];
2684
2685         if(vlan_id > ETHER_MAX_VLAN_ID)
2686         {
2687                 PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2688                         vlan_id);
2689                 return (-EINVAL);
2690         }
2691         if (vf_mask == 0)
2692         {
2693                 PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2694                 return (-EINVAL);
2695         }
2696
2697         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2698         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2699                                                 vf_mask,vlan_on);
2700 }
2701
2702 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2703                                         uint16_t tx_rate)
2704 {
2705         struct rte_eth_dev *dev;
2706         struct rte_eth_dev_info dev_info;
2707         struct rte_eth_link link;
2708
2709         if (!rte_eth_dev_is_valid_port(port_id)) {
2710                 PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
2711                                 port_id);
2712                 return -ENODEV;
2713         }
2714
2715         dev = &rte_eth_devices[port_id];
2716         rte_eth_dev_info_get(port_id, &dev_info);
2717         link = dev->data->dev_link;
2718
2719         if (queue_idx > dev_info.max_tx_queues) {
2720                 PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2721                                 "invalid queue id=%d\n", port_id, queue_idx);
2722                 return -EINVAL;
2723         }
2724
2725         if (tx_rate > link.link_speed) {
2726                 PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2727                                 "bigger than link speed= %d\n",
2728                         tx_rate, link.link_speed);
2729                 return -EINVAL;
2730         }
2731
2732         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2733         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2734 }
2735
2736 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2737                                 uint64_t q_msk)
2738 {
2739         struct rte_eth_dev *dev;
2740         struct rte_eth_dev_info dev_info;
2741         struct rte_eth_link link;
2742
2743         if (q_msk == 0)
2744                 return 0;
2745
2746         if (!rte_eth_dev_is_valid_port(port_id)) {
2747                 PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
2748                                 port_id);
2749                 return -ENODEV;
2750         }
2751
2752         dev = &rte_eth_devices[port_id];
2753         rte_eth_dev_info_get(port_id, &dev_info);
2754         link = dev->data->dev_link;
2755
2756         if (vf > dev_info.max_vfs) {
2757                 PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2758                                 "invalid vf id=%d\n", port_id, vf);
2759                 return -EINVAL;
2760         }
2761
2762         if (tx_rate > link.link_speed) {
2763                 PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2764                                 "bigger than link speed= %d\n",
2765                                 tx_rate, link.link_speed);
2766                 return -EINVAL;
2767         }
2768
2769         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2770         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2771 }
2772
2773 int
2774 rte_eth_mirror_rule_set(uint8_t port_id,
2775                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
2776                         uint8_t rule_id, uint8_t on)
2777 {
2778         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2779
2780         if (!rte_eth_dev_is_valid_port(port_id)) {
2781                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2782                 return (-ENODEV);
2783         }
2784
2785         if (mirror_conf->rule_type_mask == 0) {
2786                 PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2787                 return (-EINVAL);
2788         }
2789
2790         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2791                 PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
2792                         "be 0-%d\n",ETH_64_POOLS - 1);
2793                 return (-EINVAL);
2794         }
2795
2796         if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
2797                 (mirror_conf->pool_mask == 0)) {
2798                 PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
2799                                 "be 0.\n");
2800                 return (-EINVAL);
2801         }
2802
2803         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2804         {
2805                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2806                         ETH_VMDQ_NUM_MIRROR_RULE - 1);
2807                 return (-EINVAL);
2808         }
2809
2810         dev = &rte_eth_devices[port_id];
2811         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2812
2813         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2814 }
2815
2816 int
2817 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2818 {
2819         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2820
2821         if (!rte_eth_dev_is_valid_port(port_id)) {
2822                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2823                 return (-ENODEV);
2824         }
2825
2826         if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
2827         {
2828                 PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
2829                         ETH_VMDQ_NUM_MIRROR_RULE-1);
2830                 return (-EINVAL);
2831         }
2832
2833         dev = &rte_eth_devices[port_id];
2834         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2835
2836         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2837 }
2838
2839 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2840 uint16_t
2841 rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
2842                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2843 {
2844         struct rte_eth_dev *dev;
2845
2846         if (!rte_eth_dev_is_valid_port(port_id)) {
2847                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2848                 return 0;
2849         }
2850
2851         dev = &rte_eth_devices[port_id];
2852         FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
2853         if (queue_id >= dev->data->nb_rx_queues) {
2854                 PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
2855                 return 0;
2856         }
2857         return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
2858                                                 rx_pkts, nb_pkts);
2859 }
2860
2861 uint16_t
2862 rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
2863                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2864 {
2865         struct rte_eth_dev *dev;
2866
2867         if (!rte_eth_dev_is_valid_port(port_id)) {
2868                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2869                 return 0;
2870         }
2871
2872         dev = &rte_eth_devices[port_id];
2873
2874         FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
2875         if (queue_id >= dev->data->nb_tx_queues) {
2876                 PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
2877                 return 0;
2878         }
2879         return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
2880                                                 tx_pkts, nb_pkts);
2881 }
2882
2883 uint32_t
2884 rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
2885 {
2886         struct rte_eth_dev *dev;
2887
2888         if (!rte_eth_dev_is_valid_port(port_id)) {
2889                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2890                 return 0;
2891         }
2892
2893         dev = &rte_eth_devices[port_id];
2894         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
2895         return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
2896 }
2897
2898 int
2899 rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
2900 {
2901         struct rte_eth_dev *dev;
2902
2903         if (!rte_eth_dev_is_valid_port(port_id)) {
2904                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2905                 return (-ENODEV);
2906         }
2907
2908         dev = &rte_eth_devices[port_id];
2909         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
2910         return (*dev->dev_ops->rx_descriptor_done)( \
2911                 dev->data->rx_queues[queue_id], offset);
2912 }
2913 #endif
2914
2915 int
2916 rte_eth_dev_callback_register(uint8_t port_id,
2917                         enum rte_eth_event_type event,
2918                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2919 {
2920         struct rte_eth_dev *dev;
2921         struct rte_eth_dev_callback *user_cb;
2922
2923         if (!cb_fn)
2924                 return (-EINVAL);
2925
2926         if (!rte_eth_dev_is_valid_port(port_id)) {
2927                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2928                 return (-EINVAL);
2929         }
2930
2931         dev = &rte_eth_devices[port_id];
2932         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2933
2934         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2935                 if (user_cb->cb_fn == cb_fn &&
2936                         user_cb->cb_arg == cb_arg &&
2937                         user_cb->event == event) {
2938                         break;
2939                 }
2940         }
2941
2942         /* create a new callback. */
2943         if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2944                         sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
2945                 user_cb->cb_fn = cb_fn;
2946                 user_cb->cb_arg = cb_arg;
2947                 user_cb->event = event;
2948                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2949         }
2950
2951         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2952         return ((user_cb == NULL) ? -ENOMEM : 0);
2953 }
2954
2955 int
2956 rte_eth_dev_callback_unregister(uint8_t port_id,
2957                         enum rte_eth_event_type event,
2958                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2959 {
2960         int ret;
2961         struct rte_eth_dev *dev;
2962         struct rte_eth_dev_callback *cb, *next;
2963
2964         if (!cb_fn)
2965                 return (-EINVAL);
2966
2967         if (!rte_eth_dev_is_valid_port(port_id)) {
2968                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
2969                 return (-EINVAL);
2970         }
2971
2972         dev = &rte_eth_devices[port_id];
2973         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2974
2975         ret = 0;
2976         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2977
2978                 next = TAILQ_NEXT(cb, next);
2979
2980                 if (cb->cb_fn != cb_fn || cb->event != event ||
2981                                 (cb->cb_arg != (void *)-1 &&
2982                                 cb->cb_arg != cb_arg))
2983                         continue;
2984
2985                 /*
2986                  * if this callback is not executing right now,
2987                  * then remove it.
2988                  */
2989                 if (cb->active == 0) {
2990                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2991                         rte_free(cb);
2992                 } else {
2993                         ret = -EAGAIN;
2994                 }
2995         }
2996
2997         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2998         return (ret);
2999 }
3000
3001 void
3002 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3003         enum rte_eth_event_type event)
3004 {
3005         struct rte_eth_dev_callback *cb_lst;
3006         struct rte_eth_dev_callback dev_cb;
3007
3008         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3009         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3010                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3011                         continue;
3012                 dev_cb = *cb_lst;
3013                 cb_lst->active = 1;
3014                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3015                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3016                                                 dev_cb.cb_arg);
3017                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3018                 cb_lst->active = 0;
3019         }
3020         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3021 }
3022 #ifdef RTE_NIC_BYPASS
3023 int rte_eth_dev_bypass_init(uint8_t port_id)
3024 {
3025         struct rte_eth_dev *dev;
3026
3027         if (!rte_eth_dev_is_valid_port(port_id)) {
3028                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3029                 return (-ENODEV);
3030         }
3031
3032         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3033                 PMD_DEBUG_TRACE("Invalid port device\n");
3034                 return (-ENODEV);
3035         }
3036
3037         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
3038         (*dev->dev_ops->bypass_init)(dev);
3039         return 0;
3040 }
3041
3042 int
3043 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
3044 {
3045         struct rte_eth_dev *dev;
3046
3047         if (!rte_eth_dev_is_valid_port(port_id)) {
3048                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3049                 return (-ENODEV);
3050         }
3051
3052         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3053                 PMD_DEBUG_TRACE("Invalid port device\n");
3054                 return (-ENODEV);
3055         }
3056         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3057         (*dev->dev_ops->bypass_state_show)(dev, state);
3058         return 0;
3059 }
3060
3061 int
3062 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
3063 {
3064         struct rte_eth_dev *dev;
3065
3066         if (!rte_eth_dev_is_valid_port(port_id)) {
3067                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3068                 return (-ENODEV);
3069         }
3070
3071         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3072                 PMD_DEBUG_TRACE("Invalid port device\n");
3073                 return (-ENODEV);
3074         }
3075
3076         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
3077         (*dev->dev_ops->bypass_state_set)(dev, new_state);
3078         return 0;
3079 }
3080
3081 int
3082 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
3083 {
3084         struct rte_eth_dev *dev;
3085
3086         if (!rte_eth_dev_is_valid_port(port_id)) {
3087                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3088                 return (-ENODEV);
3089         }
3090
3091         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3092                 PMD_DEBUG_TRACE("Invalid port device\n");
3093                 return (-ENODEV);
3094         }
3095
3096         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
3097         (*dev->dev_ops->bypass_event_show)(dev, event, state);
3098         return 0;
3099 }
3100
3101 int
3102 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
3103 {
3104         struct rte_eth_dev *dev;
3105
3106         if (!rte_eth_dev_is_valid_port(port_id)) {
3107                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3108                 return (-ENODEV);
3109         }
3110
3111         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3112                 PMD_DEBUG_TRACE("Invalid port device\n");
3113                 return (-ENODEV);
3114         }
3115
3116         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
3117         (*dev->dev_ops->bypass_event_set)(dev, event, state);
3118         return 0;
3119 }
3120
3121 int
3122 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
3123 {
3124         struct rte_eth_dev *dev;
3125
3126         if (!rte_eth_dev_is_valid_port(port_id)) {
3127                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3128                 return (-ENODEV);
3129         }
3130
3131         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3132                 PMD_DEBUG_TRACE("Invalid port device\n");
3133                 return (-ENODEV);
3134         }
3135
3136         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
3137         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
3138         return 0;
3139 }
3140
3141 int
3142 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
3143 {
3144         struct rte_eth_dev *dev;
3145
3146         if (!rte_eth_dev_is_valid_port(port_id)) {
3147                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3148                 return (-ENODEV);
3149         }
3150
3151         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3152                 PMD_DEBUG_TRACE("Invalid port device\n");
3153                 return (-ENODEV);
3154         }
3155
3156         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
3157         (*dev->dev_ops->bypass_ver_show)(dev, ver);
3158         return 0;
3159 }
3160
3161 int
3162 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
3163 {
3164         struct rte_eth_dev *dev;
3165
3166         if (!rte_eth_dev_is_valid_port(port_id)) {
3167                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3168                 return (-ENODEV);
3169         }
3170
3171         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3172                 PMD_DEBUG_TRACE("Invalid port device\n");
3173                 return (-ENODEV);
3174         }
3175
3176         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
3177         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
3178         return 0;
3179 }
3180
3181 int
3182 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
3183 {
3184         struct rte_eth_dev *dev;
3185
3186         if (!rte_eth_dev_is_valid_port(port_id)) {
3187                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3188                 return (-ENODEV);
3189         }
3190
3191         if ((dev= &rte_eth_devices[port_id]) == NULL) {
3192                 PMD_DEBUG_TRACE("Invalid port device\n");
3193                 return (-ENODEV);
3194         }
3195
3196         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
3197         (*dev->dev_ops->bypass_wd_reset)(dev);
3198         return 0;
3199 }
3200 #endif
3201
3202 int
3203 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
3204 {
3205         struct rte_eth_dev *dev;
3206
3207         if (!rte_eth_dev_is_valid_port(port_id)) {
3208                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3209                 return -ENODEV;
3210         }
3211
3212         dev = &rte_eth_devices[port_id];
3213         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3214         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3215                                 RTE_ETH_FILTER_NOP, NULL);
3216 }
3217
3218 int
3219 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
3220                        enum rte_filter_op filter_op, void *arg)
3221 {
3222         struct rte_eth_dev *dev;
3223
3224         if (!rte_eth_dev_is_valid_port(port_id)) {
3225                 PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
3226                 return -ENODEV;
3227         }
3228
3229         dev = &rte_eth_devices[port_id];
3230         FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3231         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3232 }
3233
3234 void *
3235 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
3236                 rte_rxtx_callback_fn fn, void *user_param)
3237 {
3238 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3239         rte_errno = ENOTSUP;
3240         return NULL;
3241 #endif
3242         /* check input parameters */
3243         if (port_id >= nb_ports || fn == NULL ||
3244                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3245                 rte_errno = EINVAL;
3246                 return NULL;
3247         }
3248
3249         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3250
3251         if (cb == NULL) {
3252                 rte_errno = ENOMEM;
3253                 return NULL;
3254         }
3255
3256         cb->fn = fn;
3257         cb->param = user_param;
3258         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3259         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3260         return cb;
3261 }
3262
3263 void *
3264 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3265                 rte_rxtx_callback_fn fn, void *user_param)
3266 {
3267 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3268         rte_errno = ENOTSUP;
3269         return NULL;
3270 #endif
3271         /* check input parameters */
3272         if (port_id >= nb_ports || fn == NULL ||
3273                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3274                 rte_errno = EINVAL;
3275                 return NULL;
3276         }
3277
3278         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3279
3280         if (cb == NULL) {
3281                 rte_errno = ENOMEM;
3282                 return NULL;
3283         }
3284
3285         cb->fn = fn;
3286         cb->param = user_param;
3287         cb->next = rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3288         rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3289         return cb;
3290 }
3291
3292 int
3293 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3294                 struct rte_eth_rxtx_callback *user_cb)
3295 {
3296 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3297         return (-ENOTSUP);
3298 #endif
3299         /* Check input parameters. */
3300         if (port_id >= nb_ports || user_cb == NULL ||
3301                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3302                 return (-EINVAL);
3303         }
3304
3305         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3306         struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3307         struct rte_eth_rxtx_callback *prev_cb;
3308
3309         /* Reset head pointer and remove user cb if first in the list. */
3310         if (cb == user_cb) {
3311                 dev->post_rx_burst_cbs[queue_id] = user_cb->next;
3312                 return 0;
3313         }
3314
3315         /* Remove the user cb from the callback list. */
3316         do {
3317                 prev_cb = cb;
3318                 cb = cb->next;
3319
3320                 if (cb == user_cb) {
3321                         prev_cb->next = user_cb->next;
3322                         return 0;
3323                 }
3324
3325         } while (cb != NULL);
3326
3327         /* Callback wasn't found. */
3328         return (-EINVAL);
3329 }
3330
3331 int
3332 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3333                 struct rte_eth_rxtx_callback *user_cb)
3334 {
3335 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3336         return (-ENOTSUP);
3337 #endif
3338         /* Check input parameters. */
3339         if (port_id >= nb_ports || user_cb == NULL ||
3340                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3341                 return (-EINVAL);
3342         }
3343
3344         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3345         struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3346         struct rte_eth_rxtx_callback *prev_cb;
3347
3348         /* Reset head pointer and remove user cb if first in the list. */
3349         if (cb == user_cb) {
3350                 dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
3351                 return 0;
3352         }
3353
3354         /* Remove the user cb from the callback list. */
3355         do {
3356                 prev_cb = cb;
3357                 cb = cb->next;
3358
3359                 if (cb == user_cb) {
3360                         prev_cb->next = user_cb->next;
3361                         return 0;
3362                 }
3363
3364         } while (cb != NULL);
3365
3366         /* Callback wasn't found. */
3367         return (-EINVAL);
3368 }