d4ebb1b67547114bc2c40849ad11cedad3590b5d
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73 static struct rte_eth_dev_data *rte_eth_dev_data;
74 static uint8_t eth_dev_last_created_port;
75
76 /* spinlock for eth device callbacks */
77 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
78
79 /* spinlock for add/remove rx callbacks */
80 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
81
82 /* spinlock for add/remove tx callbacks */
83 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
84
85 /* store statistics names and its offset in stats structure  */
86 struct rte_eth_xstats_name_off {
87         char name[RTE_ETH_XSTATS_NAME_SIZE];
88         unsigned offset;
89 };
90
91 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
92         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
93         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
94         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
95         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
96         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
97         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
98         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
99                 rx_nombuf)},
100 };
101
102 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
103
104 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
105         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
106         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
107         {"errors", offsetof(struct rte_eth_stats, q_errors)},
108 };
109
110 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
111                 sizeof(rte_rxq_stats_strings[0]))
112
113 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
114         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
115         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
116 };
117 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
118                 sizeof(rte_txq_stats_strings[0]))
119
120
121 /**
122  * The user application callback description.
123  *
124  * It contains callback address to be registered by user application,
125  * the pointer to the parameters for callback, and the event type.
126  */
127 struct rte_eth_dev_callback {
128         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
129         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
130         void *cb_arg;                           /**< Parameter for callback */
131         void *ret_param;                        /**< Return parameter */
132         enum rte_eth_event_type event;          /**< Interrupt event type */
133         uint32_t active;                        /**< Callback is executing */
134 };
135
136 enum {
137         STAT_QMAP_TX = 0,
138         STAT_QMAP_RX
139 };
140
141 uint8_t
142 rte_eth_find_next(uint8_t port_id)
143 {
144         while (port_id < RTE_MAX_ETHPORTS &&
145                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
146                 port_id++;
147
148         if (port_id >= RTE_MAX_ETHPORTS)
149                 return RTE_MAX_ETHPORTS;
150
151         return port_id;
152 }
153
154 static void
155 rte_eth_dev_data_alloc(void)
156 {
157         const unsigned flags = 0;
158         const struct rte_memzone *mz;
159
160         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
161                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
162                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
163                                 rte_socket_id(), flags);
164         } else
165                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
166         if (mz == NULL)
167                 rte_panic("Cannot allocate memzone for ethernet port data\n");
168
169         rte_eth_dev_data = mz->addr;
170         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
171                 memset(rte_eth_dev_data, 0,
172                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
173 }
174
175 struct rte_eth_dev *
176 rte_eth_dev_allocated(const char *name)
177 {
178         unsigned i;
179
180         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
181                 if (rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED &&
182                                 rte_eth_devices[i].device) {
183                         if (!strcmp(rte_eth_devices[i].device->name, name))
184                                 return &rte_eth_devices[i];
185                 }
186         }
187         return NULL;
188 }
189
190 static uint8_t
191 rte_eth_dev_find_free_port(void)
192 {
193         unsigned i;
194
195         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
196                 if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED)
197                         return i;
198         }
199         return RTE_MAX_ETHPORTS;
200 }
201
202 static struct rte_eth_dev *
203 eth_dev_get(uint8_t port_id)
204 {
205         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
206
207         eth_dev->data = &rte_eth_dev_data[port_id];
208         eth_dev->state = RTE_ETH_DEV_ATTACHED;
209         TAILQ_INIT(&(eth_dev->link_intr_cbs));
210
211         eth_dev_last_created_port = port_id;
212
213         return eth_dev;
214 }
215
216 struct rte_eth_dev *
217 rte_eth_dev_allocate(const char *name)
218 {
219         uint8_t port_id;
220         struct rte_eth_dev *eth_dev;
221
222         port_id = rte_eth_dev_find_free_port();
223         if (port_id == RTE_MAX_ETHPORTS) {
224                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
225                 return NULL;
226         }
227
228         if (rte_eth_dev_data == NULL)
229                 rte_eth_dev_data_alloc();
230
231         if (rte_eth_dev_allocated(name) != NULL) {
232                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
233                                 name);
234                 return NULL;
235         }
236
237         memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data));
238         eth_dev = eth_dev_get(port_id);
239         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
240         eth_dev->data->port_id = port_id;
241         eth_dev->data->mtu = ETHER_MTU;
242
243         return eth_dev;
244 }
245
246 /*
247  * Attach to a port already registered by the primary process, which
248  * makes sure that the same device would have the same port id both
249  * in the primary and secondary process.
250  */
251 struct rte_eth_dev *
252 rte_eth_dev_attach_secondary(const char *name)
253 {
254         uint8_t i;
255         struct rte_eth_dev *eth_dev;
256
257         if (rte_eth_dev_data == NULL)
258                 rte_eth_dev_data_alloc();
259
260         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
261                 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
262                         break;
263         }
264         if (i == RTE_MAX_ETHPORTS) {
265                 RTE_PMD_DEBUG_TRACE(
266                         "device %s is not driven by the primary process\n",
267                         name);
268                 return NULL;
269         }
270
271         eth_dev = eth_dev_get(i);
272         RTE_ASSERT(eth_dev->data->port_id == i);
273
274         return eth_dev;
275 }
276
277 int
278 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
279 {
280         if (eth_dev == NULL)
281                 return -EINVAL;
282
283         eth_dev->state = RTE_ETH_DEV_UNUSED;
284         return 0;
285 }
286
287 int
288 rte_eth_dev_is_valid_port(uint8_t port_id)
289 {
290         if (port_id >= RTE_MAX_ETHPORTS ||
291             (rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
292              rte_eth_devices[port_id].state != RTE_ETH_DEV_DEFERRED))
293                 return 0;
294         else
295                 return 1;
296 }
297
298 int
299 rte_eth_dev_socket_id(uint8_t port_id)
300 {
301         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
302         return rte_eth_devices[port_id].data->numa_node;
303 }
304
305 uint8_t
306 rte_eth_dev_count(void)
307 {
308         uint8_t p;
309         uint8_t count;
310
311         count = 0;
312
313         RTE_ETH_FOREACH_DEV(p)
314                 count++;
315
316         return count;
317 }
318
319 int
320 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
321 {
322         const char *tmp;
323
324         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
325
326         if (name == NULL) {
327                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
328                 return -EINVAL;
329         }
330
331         /* shouldn't check 'rte_eth_devices[i].data',
332          * because it might be overwritten by VDEV PMD */
333         tmp = rte_eth_devices[port_id].device->name;
334         strcpy(name, tmp);
335         return 0;
336 }
337
338 int
339 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
340 {
341         int ret;
342         int i;
343
344         if (name == NULL) {
345                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
346                 return -EINVAL;
347         }
348
349         RTE_ETH_FOREACH_DEV(i) {
350                 if (!rte_eth_devices[i].device)
351                         continue;
352
353                 ret = strncmp(name, rte_eth_devices[i].device->name,
354                                 strlen(name));
355                 if (ret == 0) {
356                         *port_id = i;
357                         return 0;
358                 }
359         }
360         return -ENODEV;
361 }
362
363 static int
364 rte_eth_dev_is_detachable(uint8_t port_id)
365 {
366         uint32_t dev_flags;
367
368         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
369
370         switch (rte_eth_devices[port_id].data->kdrv) {
371         case RTE_KDRV_IGB_UIO:
372         case RTE_KDRV_UIO_GENERIC:
373         case RTE_KDRV_NIC_UIO:
374         case RTE_KDRV_NONE:
375         case RTE_KDRV_VFIO:
376                 break;
377         default:
378                 return -ENOTSUP;
379         }
380         dev_flags = rte_eth_devices[port_id].data->dev_flags;
381         if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
382                 (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
383                 return 0;
384         else
385                 return 1;
386 }
387
388 /* attach the new device, then store port_id of the device */
389 int
390 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
391 {
392         int ret = -1;
393         int current = rte_eth_dev_count();
394         char *name = NULL;
395         char *args = NULL;
396
397         if ((devargs == NULL) || (port_id == NULL)) {
398                 ret = -EINVAL;
399                 goto err;
400         }
401
402         /* parse devargs, then retrieve device name and args */
403         if (rte_eal_parse_devargs_str(devargs, &name, &args))
404                 goto err;
405
406         ret = rte_eal_dev_attach(name, args);
407         if (ret < 0)
408                 goto err;
409
410         /* no point looking at the port count if no port exists */
411         if (!rte_eth_dev_count()) {
412                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
413                 ret = -1;
414                 goto err;
415         }
416
417         /* if nothing happened, there is a bug here, since some driver told us
418          * it did attach a device, but did not create a port.
419          */
420         if (current == rte_eth_dev_count()) {
421                 ret = -1;
422                 goto err;
423         }
424
425         *port_id = eth_dev_last_created_port;
426         ret = 0;
427
428 err:
429         free(name);
430         free(args);
431         return ret;
432 }
433
434 /* detach the device, then store the name of the device */
435 int
436 rte_eth_dev_detach(uint8_t port_id, char *name)
437 {
438         int ret = -1;
439
440         if (name == NULL) {
441                 ret = -EINVAL;
442                 goto err;
443         }
444
445         /* FIXME: move this to eal, once device flags are relocated there */
446         if (rte_eth_dev_is_detachable(port_id))
447                 goto err;
448
449         snprintf(name, sizeof(rte_eth_devices[port_id].device->name),
450                  "%s", rte_eth_devices[port_id].device->name);
451
452         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
453         if (ret < 0)
454                 goto err;
455
456         return 0;
457
458 err:
459         return ret;
460 }
461
462 static int
463 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
464 {
465         uint16_t old_nb_queues = dev->data->nb_rx_queues;
466         void **rxq;
467         unsigned i;
468
469         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
470                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
471                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
472                                 RTE_CACHE_LINE_SIZE);
473                 if (dev->data->rx_queues == NULL) {
474                         dev->data->nb_rx_queues = 0;
475                         return -(ENOMEM);
476                 }
477         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
478                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
479
480                 rxq = dev->data->rx_queues;
481
482                 for (i = nb_queues; i < old_nb_queues; i++)
483                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
484                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
485                                 RTE_CACHE_LINE_SIZE);
486                 if (rxq == NULL)
487                         return -(ENOMEM);
488                 if (nb_queues > old_nb_queues) {
489                         uint16_t new_qs = nb_queues - old_nb_queues;
490
491                         memset(rxq + old_nb_queues, 0,
492                                 sizeof(rxq[0]) * new_qs);
493                 }
494
495                 dev->data->rx_queues = rxq;
496
497         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
498                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
499
500                 rxq = dev->data->rx_queues;
501
502                 for (i = nb_queues; i < old_nb_queues; i++)
503                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
504
505                 rte_free(dev->data->rx_queues);
506                 dev->data->rx_queues = NULL;
507         }
508         dev->data->nb_rx_queues = nb_queues;
509         return 0;
510 }
511
512 int
513 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
514 {
515         struct rte_eth_dev *dev;
516
517         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
518
519         dev = &rte_eth_devices[port_id];
520         if (rx_queue_id >= dev->data->nb_rx_queues) {
521                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
522                 return -EINVAL;
523         }
524
525         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
526
527         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
528                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
529                         " already started\n",
530                         rx_queue_id, port_id);
531                 return 0;
532         }
533
534         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
535
536 }
537
538 int
539 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
540 {
541         struct rte_eth_dev *dev;
542
543         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
544
545         dev = &rte_eth_devices[port_id];
546         if (rx_queue_id >= dev->data->nb_rx_queues) {
547                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
548                 return -EINVAL;
549         }
550
551         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
552
553         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
554                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
555                         " already stopped\n",
556                         rx_queue_id, port_id);
557                 return 0;
558         }
559
560         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
561
562 }
563
564 int
565 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
566 {
567         struct rte_eth_dev *dev;
568
569         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
570
571         dev = &rte_eth_devices[port_id];
572         if (tx_queue_id >= dev->data->nb_tx_queues) {
573                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
574                 return -EINVAL;
575         }
576
577         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
578
579         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
580                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
581                         " already started\n",
582                         tx_queue_id, port_id);
583                 return 0;
584         }
585
586         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
587
588 }
589
590 int
591 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
592 {
593         struct rte_eth_dev *dev;
594
595         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
596
597         dev = &rte_eth_devices[port_id];
598         if (tx_queue_id >= dev->data->nb_tx_queues) {
599                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
600                 return -EINVAL;
601         }
602
603         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
604
605         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
606                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
607                         " already stopped\n",
608                         tx_queue_id, port_id);
609                 return 0;
610         }
611
612         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
613
614 }
615
616 static int
617 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
618 {
619         uint16_t old_nb_queues = dev->data->nb_tx_queues;
620         void **txq;
621         unsigned i;
622
623         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
624                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
625                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
626                                                    RTE_CACHE_LINE_SIZE);
627                 if (dev->data->tx_queues == NULL) {
628                         dev->data->nb_tx_queues = 0;
629                         return -(ENOMEM);
630                 }
631         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
632                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
633
634                 txq = dev->data->tx_queues;
635
636                 for (i = nb_queues; i < old_nb_queues; i++)
637                         (*dev->dev_ops->tx_queue_release)(txq[i]);
638                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
639                                   RTE_CACHE_LINE_SIZE);
640                 if (txq == NULL)
641                         return -ENOMEM;
642                 if (nb_queues > old_nb_queues) {
643                         uint16_t new_qs = nb_queues - old_nb_queues;
644
645                         memset(txq + old_nb_queues, 0,
646                                sizeof(txq[0]) * new_qs);
647                 }
648
649                 dev->data->tx_queues = txq;
650
651         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
652                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
653
654                 txq = dev->data->tx_queues;
655
656                 for (i = nb_queues; i < old_nb_queues; i++)
657                         (*dev->dev_ops->tx_queue_release)(txq[i]);
658
659                 rte_free(dev->data->tx_queues);
660                 dev->data->tx_queues = NULL;
661         }
662         dev->data->nb_tx_queues = nb_queues;
663         return 0;
664 }
665
666 uint32_t
667 rte_eth_speed_bitflag(uint32_t speed, int duplex)
668 {
669         switch (speed) {
670         case ETH_SPEED_NUM_10M:
671                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
672         case ETH_SPEED_NUM_100M:
673                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
674         case ETH_SPEED_NUM_1G:
675                 return ETH_LINK_SPEED_1G;
676         case ETH_SPEED_NUM_2_5G:
677                 return ETH_LINK_SPEED_2_5G;
678         case ETH_SPEED_NUM_5G:
679                 return ETH_LINK_SPEED_5G;
680         case ETH_SPEED_NUM_10G:
681                 return ETH_LINK_SPEED_10G;
682         case ETH_SPEED_NUM_20G:
683                 return ETH_LINK_SPEED_20G;
684         case ETH_SPEED_NUM_25G:
685                 return ETH_LINK_SPEED_25G;
686         case ETH_SPEED_NUM_40G:
687                 return ETH_LINK_SPEED_40G;
688         case ETH_SPEED_NUM_50G:
689                 return ETH_LINK_SPEED_50G;
690         case ETH_SPEED_NUM_56G:
691                 return ETH_LINK_SPEED_56G;
692         case ETH_SPEED_NUM_100G:
693                 return ETH_LINK_SPEED_100G;
694         default:
695                 return 0;
696         }
697 }
698
699 int
700 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
701                       const struct rte_eth_conf *dev_conf)
702 {
703         struct rte_eth_dev *dev;
704         struct rte_eth_dev_info dev_info;
705         int diag;
706
707         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
708
709         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
710                 RTE_PMD_DEBUG_TRACE(
711                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
712                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
713                 return -EINVAL;
714         }
715
716         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
717                 RTE_PMD_DEBUG_TRACE(
718                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
719                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
720                 return -EINVAL;
721         }
722
723         dev = &rte_eth_devices[port_id];
724
725         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
726         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
727
728         if (dev->data->dev_started) {
729                 RTE_PMD_DEBUG_TRACE(
730                     "port %d must be stopped to allow configuration\n", port_id);
731                 return -EBUSY;
732         }
733
734         /* Copy the dev_conf parameter into the dev structure */
735         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
736
737         /*
738          * Check that the numbers of RX and TX queues are not greater
739          * than the maximum number of RX and TX queues supported by the
740          * configured device.
741          */
742         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
743
744         if (nb_rx_q == 0 && nb_tx_q == 0) {
745                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
746                 return -EINVAL;
747         }
748
749         if (nb_rx_q > dev_info.max_rx_queues) {
750                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
751                                 port_id, nb_rx_q, dev_info.max_rx_queues);
752                 return -EINVAL;
753         }
754
755         if (nb_tx_q > dev_info.max_tx_queues) {
756                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
757                                 port_id, nb_tx_q, dev_info.max_tx_queues);
758                 return -EINVAL;
759         }
760
761         /* Check that the device supports requested interrupts */
762         if ((dev_conf->intr_conf.lsc == 1) &&
763                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
764                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
765                                         dev->device->driver->name);
766                         return -EINVAL;
767         }
768         if ((dev_conf->intr_conf.rmv == 1) &&
769             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
770                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
771                                     dev->device->driver->name);
772                 return -EINVAL;
773         }
774
775         /*
776          * If jumbo frames are enabled, check that the maximum RX packet
777          * length is supported by the configured device.
778          */
779         if (dev_conf->rxmode.jumbo_frame == 1) {
780                 if (dev_conf->rxmode.max_rx_pkt_len >
781                     dev_info.max_rx_pktlen) {
782                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
783                                 " > max valid value %u\n",
784                                 port_id,
785                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
786                                 (unsigned)dev_info.max_rx_pktlen);
787                         return -EINVAL;
788                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
789                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
790                                 " < min valid value %u\n",
791                                 port_id,
792                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
793                                 (unsigned)ETHER_MIN_LEN);
794                         return -EINVAL;
795                 }
796         } else {
797                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
798                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
799                         /* Use default value */
800                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
801                                                         ETHER_MAX_LEN;
802         }
803
804         /*
805          * Setup new number of RX/TX queues and reconfigure device.
806          */
807         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
808         if (diag != 0) {
809                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
810                                 port_id, diag);
811                 return diag;
812         }
813
814         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
815         if (diag != 0) {
816                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
817                                 port_id, diag);
818                 rte_eth_dev_rx_queue_config(dev, 0);
819                 return diag;
820         }
821
822         diag = (*dev->dev_ops->dev_configure)(dev);
823         if (diag != 0) {
824                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
825                                 port_id, diag);
826                 rte_eth_dev_rx_queue_config(dev, 0);
827                 rte_eth_dev_tx_queue_config(dev, 0);
828                 return diag;
829         }
830
831         return 0;
832 }
833
834 void
835 _rte_eth_dev_reset(struct rte_eth_dev *dev)
836 {
837         if (dev->data->dev_started) {
838                 RTE_PMD_DEBUG_TRACE(
839                         "port %d must be stopped to allow reset\n",
840                         dev->data->port_id);
841                 return;
842         }
843
844         rte_eth_dev_rx_queue_config(dev, 0);
845         rte_eth_dev_tx_queue_config(dev, 0);
846
847         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
848 }
849
850 static void
851 rte_eth_dev_config_restore(uint8_t port_id)
852 {
853         struct rte_eth_dev *dev;
854         struct rte_eth_dev_info dev_info;
855         struct ether_addr *addr;
856         uint16_t i;
857         uint32_t pool = 0;
858         uint64_t pool_mask;
859
860         dev = &rte_eth_devices[port_id];
861
862         rte_eth_dev_info_get(port_id, &dev_info);
863
864         /* replay MAC address configuration including default MAC */
865         addr = &dev->data->mac_addrs[0];
866         if (*dev->dev_ops->mac_addr_set != NULL)
867                 (*dev->dev_ops->mac_addr_set)(dev, addr);
868         else if (*dev->dev_ops->mac_addr_add != NULL)
869                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
870
871         if (*dev->dev_ops->mac_addr_add != NULL) {
872                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
873                         addr = &dev->data->mac_addrs[i];
874
875                         /* skip zero address */
876                         if (is_zero_ether_addr(addr))
877                                 continue;
878
879                         pool = 0;
880                         pool_mask = dev->data->mac_pool_sel[i];
881
882                         do {
883                                 if (pool_mask & 1ULL)
884                                         (*dev->dev_ops->mac_addr_add)(dev,
885                                                 addr, i, pool);
886                                 pool_mask >>= 1;
887                                 pool++;
888                         } while (pool_mask);
889                 }
890         }
891
892         /* replay promiscuous configuration */
893         if (rte_eth_promiscuous_get(port_id) == 1)
894                 rte_eth_promiscuous_enable(port_id);
895         else if (rte_eth_promiscuous_get(port_id) == 0)
896                 rte_eth_promiscuous_disable(port_id);
897
898         /* replay all multicast configuration */
899         if (rte_eth_allmulticast_get(port_id) == 1)
900                 rte_eth_allmulticast_enable(port_id);
901         else if (rte_eth_allmulticast_get(port_id) == 0)
902                 rte_eth_allmulticast_disable(port_id);
903 }
904
905 int
906 rte_eth_dev_start(uint8_t port_id)
907 {
908         struct rte_eth_dev *dev;
909         int diag;
910
911         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
912
913         dev = &rte_eth_devices[port_id];
914
915         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
916
917         if (dev->data->dev_started != 0) {
918                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
919                         " already started\n",
920                         port_id);
921                 return 0;
922         }
923
924         diag = (*dev->dev_ops->dev_start)(dev);
925         if (diag == 0)
926                 dev->data->dev_started = 1;
927         else
928                 return diag;
929
930         rte_eth_dev_config_restore(port_id);
931
932         if (dev->data->dev_conf.intr_conf.lsc == 0) {
933                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
934                 (*dev->dev_ops->link_update)(dev, 0);
935         }
936         return 0;
937 }
938
939 void
940 rte_eth_dev_stop(uint8_t port_id)
941 {
942         struct rte_eth_dev *dev;
943
944         RTE_ETH_VALID_PORTID_OR_RET(port_id);
945         dev = &rte_eth_devices[port_id];
946
947         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
948
949         if (dev->data->dev_started == 0) {
950                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
951                         " already stopped\n",
952                         port_id);
953                 return;
954         }
955
956         dev->data->dev_started = 0;
957         (*dev->dev_ops->dev_stop)(dev);
958 }
959
960 int
961 rte_eth_dev_set_link_up(uint8_t port_id)
962 {
963         struct rte_eth_dev *dev;
964
965         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
966
967         dev = &rte_eth_devices[port_id];
968
969         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
970         return (*dev->dev_ops->dev_set_link_up)(dev);
971 }
972
973 int
974 rte_eth_dev_set_link_down(uint8_t port_id)
975 {
976         struct rte_eth_dev *dev;
977
978         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
979
980         dev = &rte_eth_devices[port_id];
981
982         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
983         return (*dev->dev_ops->dev_set_link_down)(dev);
984 }
985
986 void
987 rte_eth_dev_close(uint8_t port_id)
988 {
989         struct rte_eth_dev *dev;
990
991         RTE_ETH_VALID_PORTID_OR_RET(port_id);
992         dev = &rte_eth_devices[port_id];
993
994         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
995         dev->data->dev_started = 0;
996         (*dev->dev_ops->dev_close)(dev);
997
998         dev->data->nb_rx_queues = 0;
999         rte_free(dev->data->rx_queues);
1000         dev->data->rx_queues = NULL;
1001         dev->data->nb_tx_queues = 0;
1002         rte_free(dev->data->tx_queues);
1003         dev->data->tx_queues = NULL;
1004 }
1005
1006 int
1007 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1008                        uint16_t nb_rx_desc, unsigned int socket_id,
1009                        const struct rte_eth_rxconf *rx_conf,
1010                        struct rte_mempool *mp)
1011 {
1012         int ret;
1013         uint32_t mbp_buf_size;
1014         struct rte_eth_dev *dev;
1015         struct rte_eth_dev_info dev_info;
1016         void **rxq;
1017
1018         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1019
1020         dev = &rte_eth_devices[port_id];
1021         if (rx_queue_id >= dev->data->nb_rx_queues) {
1022                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1023                 return -EINVAL;
1024         }
1025
1026         if (dev->data->dev_started) {
1027                 RTE_PMD_DEBUG_TRACE(
1028                     "port %d must be stopped to allow configuration\n", port_id);
1029                 return -EBUSY;
1030         }
1031
1032         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1033         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1034
1035         /*
1036          * Check the size of the mbuf data buffer.
1037          * This value must be provided in the private data of the memory pool.
1038          * First check that the memory pool has a valid private data.
1039          */
1040         rte_eth_dev_info_get(port_id, &dev_info);
1041         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1042                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1043                                 mp->name, (int) mp->private_data_size,
1044                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1045                 return -ENOSPC;
1046         }
1047         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1048
1049         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1050                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1051                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1052                                 "=%d)\n",
1053                                 mp->name,
1054                                 (int)mbp_buf_size,
1055                                 (int)(RTE_PKTMBUF_HEADROOM +
1056                                       dev_info.min_rx_bufsize),
1057                                 (int)RTE_PKTMBUF_HEADROOM,
1058                                 (int)dev_info.min_rx_bufsize);
1059                 return -EINVAL;
1060         }
1061
1062         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1063                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1064                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1065
1066                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1067                         "should be: <= %hu, = %hu, and a product of %hu\n",
1068                         nb_rx_desc,
1069                         dev_info.rx_desc_lim.nb_max,
1070                         dev_info.rx_desc_lim.nb_min,
1071                         dev_info.rx_desc_lim.nb_align);
1072                 return -EINVAL;
1073         }
1074
1075         rxq = dev->data->rx_queues;
1076         if (rxq[rx_queue_id]) {
1077                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1078                                         -ENOTSUP);
1079                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1080                 rxq[rx_queue_id] = NULL;
1081         }
1082
1083         if (rx_conf == NULL)
1084                 rx_conf = &dev_info.default_rxconf;
1085
1086         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1087                                               socket_id, rx_conf, mp);
1088         if (!ret) {
1089                 if (!dev->data->min_rx_buf_size ||
1090                     dev->data->min_rx_buf_size > mbp_buf_size)
1091                         dev->data->min_rx_buf_size = mbp_buf_size;
1092         }
1093
1094         return ret;
1095 }
1096
1097 int
1098 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1099                        uint16_t nb_tx_desc, unsigned int socket_id,
1100                        const struct rte_eth_txconf *tx_conf)
1101 {
1102         struct rte_eth_dev *dev;
1103         struct rte_eth_dev_info dev_info;
1104         void **txq;
1105
1106         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1107
1108         dev = &rte_eth_devices[port_id];
1109         if (tx_queue_id >= dev->data->nb_tx_queues) {
1110                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1111                 return -EINVAL;
1112         }
1113
1114         if (dev->data->dev_started) {
1115                 RTE_PMD_DEBUG_TRACE(
1116                     "port %d must be stopped to allow configuration\n", port_id);
1117                 return -EBUSY;
1118         }
1119
1120         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1121         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1122
1123         rte_eth_dev_info_get(port_id, &dev_info);
1124
1125         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1126             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1127             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1128                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1129                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1130                                 nb_tx_desc,
1131                                 dev_info.tx_desc_lim.nb_max,
1132                                 dev_info.tx_desc_lim.nb_min,
1133                                 dev_info.tx_desc_lim.nb_align);
1134                 return -EINVAL;
1135         }
1136
1137         txq = dev->data->tx_queues;
1138         if (txq[tx_queue_id]) {
1139                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1140                                         -ENOTSUP);
1141                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1142                 txq[tx_queue_id] = NULL;
1143         }
1144
1145         if (tx_conf == NULL)
1146                 tx_conf = &dev_info.default_txconf;
1147
1148         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1149                                                socket_id, tx_conf);
1150 }
1151
1152 void
1153 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1154                 void *userdata __rte_unused)
1155 {
1156         unsigned i;
1157
1158         for (i = 0; i < unsent; i++)
1159                 rte_pktmbuf_free(pkts[i]);
1160 }
1161
1162 void
1163 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1164                 void *userdata)
1165 {
1166         uint64_t *count = userdata;
1167         unsigned i;
1168
1169         for (i = 0; i < unsent; i++)
1170                 rte_pktmbuf_free(pkts[i]);
1171
1172         *count += unsent;
1173 }
1174
1175 int
1176 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1177                 buffer_tx_error_fn cbfn, void *userdata)
1178 {
1179         buffer->error_callback = cbfn;
1180         buffer->error_userdata = userdata;
1181         return 0;
1182 }
1183
1184 int
1185 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1186 {
1187         int ret = 0;
1188
1189         if (buffer == NULL)
1190                 return -EINVAL;
1191
1192         buffer->size = size;
1193         if (buffer->error_callback == NULL) {
1194                 ret = rte_eth_tx_buffer_set_err_callback(
1195                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1196         }
1197
1198         return ret;
1199 }
1200
1201 int
1202 rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt)
1203 {
1204         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1205
1206         /* Validate Input Data. Bail if not valid or not supported. */
1207         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1208         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1209
1210         /* Call driver to free pending mbufs. */
1211         return (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1212                         free_cnt);
1213 }
1214
1215 void
1216 rte_eth_promiscuous_enable(uint8_t port_id)
1217 {
1218         struct rte_eth_dev *dev;
1219
1220         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1221         dev = &rte_eth_devices[port_id];
1222
1223         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1224         (*dev->dev_ops->promiscuous_enable)(dev);
1225         dev->data->promiscuous = 1;
1226 }
1227
1228 void
1229 rte_eth_promiscuous_disable(uint8_t port_id)
1230 {
1231         struct rte_eth_dev *dev;
1232
1233         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1234         dev = &rte_eth_devices[port_id];
1235
1236         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1237         dev->data->promiscuous = 0;
1238         (*dev->dev_ops->promiscuous_disable)(dev);
1239 }
1240
1241 int
1242 rte_eth_promiscuous_get(uint8_t port_id)
1243 {
1244         struct rte_eth_dev *dev;
1245
1246         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1247
1248         dev = &rte_eth_devices[port_id];
1249         return dev->data->promiscuous;
1250 }
1251
1252 void
1253 rte_eth_allmulticast_enable(uint8_t port_id)
1254 {
1255         struct rte_eth_dev *dev;
1256
1257         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1258         dev = &rte_eth_devices[port_id];
1259
1260         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1261         (*dev->dev_ops->allmulticast_enable)(dev);
1262         dev->data->all_multicast = 1;
1263 }
1264
1265 void
1266 rte_eth_allmulticast_disable(uint8_t port_id)
1267 {
1268         struct rte_eth_dev *dev;
1269
1270         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1271         dev = &rte_eth_devices[port_id];
1272
1273         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1274         dev->data->all_multicast = 0;
1275         (*dev->dev_ops->allmulticast_disable)(dev);
1276 }
1277
1278 int
1279 rte_eth_allmulticast_get(uint8_t port_id)
1280 {
1281         struct rte_eth_dev *dev;
1282
1283         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1284
1285         dev = &rte_eth_devices[port_id];
1286         return dev->data->all_multicast;
1287 }
1288
1289 static inline int
1290 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1291                                 struct rte_eth_link *link)
1292 {
1293         struct rte_eth_link *dst = link;
1294         struct rte_eth_link *src = &(dev->data->dev_link);
1295
1296         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1297                                         *(uint64_t *)src) == 0)
1298                 return -1;
1299
1300         return 0;
1301 }
1302
1303 void
1304 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1305 {
1306         struct rte_eth_dev *dev;
1307
1308         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1309         dev = &rte_eth_devices[port_id];
1310
1311         if (dev->data->dev_conf.intr_conf.lsc != 0)
1312                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1313         else {
1314                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1315                 (*dev->dev_ops->link_update)(dev, 1);
1316                 *eth_link = dev->data->dev_link;
1317         }
1318 }
1319
1320 void
1321 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1322 {
1323         struct rte_eth_dev *dev;
1324
1325         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1326         dev = &rte_eth_devices[port_id];
1327
1328         if (dev->data->dev_conf.intr_conf.lsc != 0)
1329                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1330         else {
1331                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1332                 (*dev->dev_ops->link_update)(dev, 0);
1333                 *eth_link = dev->data->dev_link;
1334         }
1335 }
1336
1337 int
1338 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1339 {
1340         struct rte_eth_dev *dev;
1341
1342         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1343
1344         dev = &rte_eth_devices[port_id];
1345         memset(stats, 0, sizeof(*stats));
1346
1347         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1348         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1349         (*dev->dev_ops->stats_get)(dev, stats);
1350         return 0;
1351 }
1352
1353 void
1354 rte_eth_stats_reset(uint8_t port_id)
1355 {
1356         struct rte_eth_dev *dev;
1357
1358         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1359         dev = &rte_eth_devices[port_id];
1360
1361         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1362         (*dev->dev_ops->stats_reset)(dev);
1363         dev->data->rx_mbuf_alloc_failed = 0;
1364 }
1365
1366 static int
1367 get_xstats_count(uint8_t port_id)
1368 {
1369         struct rte_eth_dev *dev;
1370         int count;
1371
1372         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1373         dev = &rte_eth_devices[port_id];
1374         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1375                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1376                                 NULL, 0);
1377                 if (count < 0)
1378                         return count;
1379         }
1380         if (dev->dev_ops->xstats_get_names != NULL) {
1381                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1382                 if (count < 0)
1383                         return count;
1384         } else
1385                 count = 0;
1386
1387         count += RTE_NB_STATS;
1388         count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1389                  RTE_NB_RXQ_STATS;
1390         count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1391                  RTE_NB_TXQ_STATS;
1392         return count;
1393 }
1394
1395 int
1396 rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
1397                 uint64_t *id)
1398 {
1399         int cnt_xstats, idx_xstat;
1400
1401         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1402
1403         if (!id) {
1404                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1405                 return -ENOMEM;
1406         }
1407
1408         if (!xstat_name) {
1409                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1410                 return -ENOMEM;
1411         }
1412
1413         /* Get count */
1414         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1415         if (cnt_xstats  < 0) {
1416                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1417                 return -ENODEV;
1418         }
1419
1420         /* Get id-name lookup table */
1421         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1422
1423         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1424                         port_id, xstats_names, cnt_xstats, NULL)) {
1425                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1426                 return -1;
1427         }
1428
1429         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1430                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1431                         *id = idx_xstat;
1432                         return 0;
1433                 };
1434         }
1435
1436         return -EINVAL;
1437 }
1438
1439 int
1440 rte_eth_xstats_get_names_by_id(uint8_t port_id,
1441         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1442         uint64_t *ids)
1443 {
1444         /* Get all xstats */
1445         if (!ids) {
1446                 struct rte_eth_dev *dev;
1447                 int cnt_used_entries;
1448                 int cnt_expected_entries;
1449                 int cnt_driver_entries;
1450                 uint32_t idx, id_queue;
1451                 uint16_t num_q;
1452
1453                 cnt_expected_entries = get_xstats_count(port_id);
1454                 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1455                                 (int)size < cnt_expected_entries)
1456                         return cnt_expected_entries;
1457
1458                 /* port_id checked in get_xstats_count() */
1459                 dev = &rte_eth_devices[port_id];
1460                 cnt_used_entries = 0;
1461
1462                 for (idx = 0; idx < RTE_NB_STATS; idx++) {
1463                         snprintf(xstats_names[cnt_used_entries].name,
1464                                 sizeof(xstats_names[0].name),
1465                                 "%s", rte_stats_strings[idx].name);
1466                         cnt_used_entries++;
1467                 }
1468                 num_q = RTE_MIN(dev->data->nb_rx_queues,
1469                                 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1470                 for (id_queue = 0; id_queue < num_q; id_queue++) {
1471                         for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1472                                 snprintf(xstats_names[cnt_used_entries].name,
1473                                         sizeof(xstats_names[0].name),
1474                                         "rx_q%u%s",
1475                                         id_queue,
1476                                         rte_rxq_stats_strings[idx].name);
1477                                 cnt_used_entries++;
1478                         }
1479
1480                 }
1481                 num_q = RTE_MIN(dev->data->nb_tx_queues,
1482                                 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1483                 for (id_queue = 0; id_queue < num_q; id_queue++) {
1484                         for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1485                                 snprintf(xstats_names[cnt_used_entries].name,
1486                                         sizeof(xstats_names[0].name),
1487                                         "tx_q%u%s",
1488                                         id_queue,
1489                                         rte_txq_stats_strings[idx].name);
1490                                 cnt_used_entries++;
1491                         }
1492                 }
1493
1494                 if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1495                         /* If there are any driver-specific xstats, append them
1496                          * to end of list.
1497                          */
1498                         cnt_driver_entries =
1499                                 (*dev->dev_ops->xstats_get_names_by_id)(
1500                                 dev,
1501                                 xstats_names + cnt_used_entries,
1502                                 NULL,
1503                                 size - cnt_used_entries);
1504                         if (cnt_driver_entries < 0)
1505                                 return cnt_driver_entries;
1506                         cnt_used_entries += cnt_driver_entries;
1507
1508                 } else if (dev->dev_ops->xstats_get_names != NULL) {
1509                         /* If there are any driver-specific xstats, append them
1510                          * to end of list.
1511                          */
1512                         cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1513                                 dev,
1514                                 xstats_names + cnt_used_entries,
1515                                 size - cnt_used_entries);
1516                         if (cnt_driver_entries < 0)
1517                                 return cnt_driver_entries;
1518                         cnt_used_entries += cnt_driver_entries;
1519                 }
1520
1521                 return cnt_used_entries;
1522         }
1523         /* Get only xstats given by IDS */
1524         else {
1525                 uint16_t len, i;
1526                 struct rte_eth_xstat_name *xstats_names_copy;
1527
1528                 len = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1529
1530                 xstats_names_copy =
1531                                 malloc(sizeof(struct rte_eth_xstat_name) * len);
1532                 if (!xstats_names_copy) {
1533                         RTE_PMD_DEBUG_TRACE(
1534                              "ERROR: can't allocate memory for values_copy\n");
1535                         free(xstats_names_copy);
1536                         return -1;
1537                 }
1538
1539                 rte_eth_xstats_get_names_by_id(port_id, xstats_names_copy,
1540                                 len, NULL);
1541
1542                 for (i = 0; i < size; i++) {
1543                         if (ids[i] >= len) {
1544                                 RTE_PMD_DEBUG_TRACE(
1545                                         "ERROR: id value isn't valid\n");
1546                                 return -1;
1547                         }
1548                         strcpy(xstats_names[i].name,
1549                                         xstats_names_copy[ids[i]].name);
1550                 }
1551                 free(xstats_names_copy);
1552                 return size;
1553         }
1554 }
1555
1556 int
1557 rte_eth_xstats_get_names(uint8_t port_id,
1558         struct rte_eth_xstat_name *xstats_names,
1559         unsigned int size)
1560 {
1561         struct rte_eth_dev *dev;
1562         int cnt_used_entries;
1563         int cnt_expected_entries;
1564         int cnt_driver_entries;
1565         uint32_t idx, id_queue;
1566         uint16_t num_q;
1567
1568         cnt_expected_entries = get_xstats_count(port_id);
1569         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1570                         (int)size < cnt_expected_entries)
1571                 return cnt_expected_entries;
1572
1573         /* port_id checked in get_xstats_count() */
1574         dev = &rte_eth_devices[port_id];
1575         cnt_used_entries = 0;
1576
1577         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1578                 snprintf(xstats_names[cnt_used_entries].name,
1579                         sizeof(xstats_names[0].name),
1580                         "%s", rte_stats_strings[idx].name);
1581                 cnt_used_entries++;
1582         }
1583         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1584         for (id_queue = 0; id_queue < num_q; id_queue++) {
1585                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1586                         snprintf(xstats_names[cnt_used_entries].name,
1587                                 sizeof(xstats_names[0].name),
1588                                 "rx_q%u%s",
1589                                 id_queue, rte_rxq_stats_strings[idx].name);
1590                         cnt_used_entries++;
1591                 }
1592
1593         }
1594         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1595         for (id_queue = 0; id_queue < num_q; id_queue++) {
1596                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1597                         snprintf(xstats_names[cnt_used_entries].name,
1598                                 sizeof(xstats_names[0].name),
1599                                 "tx_q%u%s",
1600                                 id_queue, rte_txq_stats_strings[idx].name);
1601                         cnt_used_entries++;
1602                 }
1603         }
1604
1605         if (dev->dev_ops->xstats_get_names != NULL) {
1606                 /* If there are any driver-specific xstats, append them
1607                  * to end of list.
1608                  */
1609                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1610                         dev,
1611                         xstats_names + cnt_used_entries,
1612                         size - cnt_used_entries);
1613                 if (cnt_driver_entries < 0)
1614                         return cnt_driver_entries;
1615                 cnt_used_entries += cnt_driver_entries;
1616         }
1617
1618         return cnt_used_entries;
1619 }
1620
1621 /* retrieve ethdev extended statistics */
1622 int
1623 rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids, uint64_t *values,
1624         unsigned int n)
1625 {
1626         /* If need all xstats */
1627         if (!ids) {
1628                 struct rte_eth_stats eth_stats;
1629                 struct rte_eth_dev *dev;
1630                 unsigned int count = 0, i, q;
1631                 signed int xcount = 0;
1632                 uint64_t val, *stats_ptr;
1633                 uint16_t nb_rxqs, nb_txqs;
1634
1635                 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1636                 dev = &rte_eth_devices[port_id];
1637
1638                 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues,
1639                                 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1640                 nb_txqs = RTE_MIN(dev->data->nb_tx_queues,
1641                                 RTE_ETHDEV_QUEUE_STAT_CNTRS);
1642
1643                 /* Return generic statistics */
1644                 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1645                         (nb_txqs * RTE_NB_TXQ_STATS);
1646
1647
1648                 /* implemented by the driver */
1649                 if (dev->dev_ops->xstats_get_by_id != NULL) {
1650                         /* Retrieve the xstats from the driver at the end of the
1651                          * xstats struct. Retrieve all xstats.
1652                          */
1653                         xcount = (*dev->dev_ops->xstats_get_by_id)(dev,
1654                                         NULL,
1655                                         values ? values + count : NULL,
1656                                         (n > count) ? n - count : 0);
1657
1658                         if (xcount < 0)
1659                                 return xcount;
1660                 /* implemented by the driver */
1661                 } else if (dev->dev_ops->xstats_get != NULL) {
1662                         /* Retrieve the xstats from the driver at the end of the
1663                          * xstats struct. Retrieve all xstats.
1664                          * Compatibility for PMD without xstats_get_by_ids
1665                          */
1666                         unsigned int size = (n > count) ? n - count : 1;
1667                         struct rte_eth_xstat xstats[size];
1668
1669                         xcount = (*dev->dev_ops->xstats_get)(dev,
1670                                         values ? xstats : NULL, size);
1671
1672                         if (xcount < 0)
1673                                 return xcount;
1674
1675                         if (values != NULL)
1676                                 for (i = 0 ; i < (unsigned int)xcount; i++)
1677                                         values[i + count] = xstats[i].value;
1678                 }
1679
1680                 if (n < count + xcount || values == NULL)
1681                         return count + xcount;
1682
1683                 /* now fill the xstats structure */
1684                 count = 0;
1685                 rte_eth_stats_get(port_id, &eth_stats);
1686
1687                 /* global stats */
1688                 for (i = 0; i < RTE_NB_STATS; i++) {
1689                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1690                                                 rte_stats_strings[i].offset);
1691                         val = *stats_ptr;
1692                         values[count++] = val;
1693                 }
1694
1695                 /* per-rxq stats */
1696                 for (q = 0; q < nb_rxqs; q++) {
1697                         for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1698                                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1699                                             rte_rxq_stats_strings[i].offset +
1700                                             q * sizeof(uint64_t));
1701                                 val = *stats_ptr;
1702                                 values[count++] = val;
1703                         }
1704                 }
1705
1706                 /* per-txq stats */
1707                 for (q = 0; q < nb_txqs; q++) {
1708                         for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1709                                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1710                                             rte_txq_stats_strings[i].offset +
1711                                             q * sizeof(uint64_t));
1712                                 val = *stats_ptr;
1713                                 values[count++] = val;
1714                         }
1715                 }
1716
1717                 return count + xcount;
1718         }
1719         /* Need only xstats given by IDS array */
1720         else {
1721                 uint16_t i, size;
1722                 uint64_t *values_copy;
1723
1724                 size = rte_eth_xstats_get_by_id(port_id, NULL, NULL, 0);
1725
1726                 values_copy = malloc(sizeof(*values_copy) * size);
1727                 if (!values_copy) {
1728                         RTE_PMD_DEBUG_TRACE(
1729                             "ERROR: can't allocate memory for values_copy\n");
1730                         return -1;
1731                 }
1732
1733                 rte_eth_xstats_get_by_id(port_id, NULL, values_copy, size);
1734
1735                 for (i = 0; i < n; i++) {
1736                         if (ids[i] >= size) {
1737                                 RTE_PMD_DEBUG_TRACE(
1738                                         "ERROR: id value isn't valid\n");
1739                                 return -1;
1740                         }
1741                         values[i] = values_copy[ids[i]];
1742                 }
1743                 free(values_copy);
1744                 return n;
1745         }
1746 }
1747
1748 int
1749 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1750         unsigned int n)
1751 {
1752         struct rte_eth_stats eth_stats;
1753         struct rte_eth_dev *dev;
1754         unsigned int count = 0, i, q;
1755         signed int xcount = 0;
1756         uint64_t val, *stats_ptr;
1757         uint16_t nb_rxqs, nb_txqs;
1758
1759         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1760
1761         dev = &rte_eth_devices[port_id];
1762
1763         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1764         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1765
1766         /* Return generic statistics */
1767         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1768                 (nb_txqs * RTE_NB_TXQ_STATS);
1769
1770         /* implemented by the driver */
1771         if (dev->dev_ops->xstats_get != NULL) {
1772                 /* Retrieve the xstats from the driver at the end of the
1773                  * xstats struct.
1774                  */
1775                 xcount = (*dev->dev_ops->xstats_get)(dev,
1776                                      xstats ? xstats + count : NULL,
1777                                      (n > count) ? n - count : 0);
1778
1779                 if (xcount < 0)
1780                         return xcount;
1781         }
1782
1783         if (n < count + xcount || xstats == NULL)
1784                 return count + xcount;
1785
1786         /* now fill the xstats structure */
1787         count = 0;
1788         rte_eth_stats_get(port_id, &eth_stats);
1789
1790         /* global stats */
1791         for (i = 0; i < RTE_NB_STATS; i++) {
1792                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1793                                         rte_stats_strings[i].offset);
1794                 val = *stats_ptr;
1795                 xstats[count++].value = val;
1796         }
1797
1798         /* per-rxq stats */
1799         for (q = 0; q < nb_rxqs; q++) {
1800                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1801                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1802                                         rte_rxq_stats_strings[i].offset +
1803                                         q * sizeof(uint64_t));
1804                         val = *stats_ptr;
1805                         xstats[count++].value = val;
1806                 }
1807         }
1808
1809         /* per-txq stats */
1810         for (q = 0; q < nb_txqs; q++) {
1811                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1812                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1813                                         rte_txq_stats_strings[i].offset +
1814                                         q * sizeof(uint64_t));
1815                         val = *stats_ptr;
1816                         xstats[count++].value = val;
1817                 }
1818         }
1819
1820         for (i = 0; i < count; i++)
1821                 xstats[i].id = i;
1822         /* add an offset to driver-specific stats */
1823         for ( ; i < count + xcount; i++)
1824                 xstats[i].id += count;
1825
1826         return count + xcount;
1827 }
1828
1829 /* reset ethdev extended statistics */
1830 void
1831 rte_eth_xstats_reset(uint8_t port_id)
1832 {
1833         struct rte_eth_dev *dev;
1834
1835         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1836         dev = &rte_eth_devices[port_id];
1837
1838         /* implemented by the driver */
1839         if (dev->dev_ops->xstats_reset != NULL) {
1840                 (*dev->dev_ops->xstats_reset)(dev);
1841                 return;
1842         }
1843
1844         /* fallback to default */
1845         rte_eth_stats_reset(port_id);
1846 }
1847
1848 static int
1849 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1850                 uint8_t is_rx)
1851 {
1852         struct rte_eth_dev *dev;
1853
1854         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1855
1856         dev = &rte_eth_devices[port_id];
1857
1858         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1859         return (*dev->dev_ops->queue_stats_mapping_set)
1860                         (dev, queue_id, stat_idx, is_rx);
1861 }
1862
1863
1864 int
1865 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1866                 uint8_t stat_idx)
1867 {
1868         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1869                         STAT_QMAP_TX);
1870 }
1871
1872
1873 int
1874 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1875                 uint8_t stat_idx)
1876 {
1877         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1878                         STAT_QMAP_RX);
1879 }
1880
1881 int
1882 rte_eth_dev_fw_version_get(uint8_t port_id, char *fw_version, size_t fw_size)
1883 {
1884         struct rte_eth_dev *dev;
1885
1886         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1887         dev = &rte_eth_devices[port_id];
1888
1889         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
1890         return (*dev->dev_ops->fw_version_get)(dev, fw_version, fw_size);
1891 }
1892
1893 void
1894 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1895 {
1896         struct rte_eth_dev *dev;
1897         const struct rte_eth_desc_lim lim = {
1898                 .nb_max = UINT16_MAX,
1899                 .nb_min = 0,
1900                 .nb_align = 1,
1901         };
1902
1903         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1904         dev = &rte_eth_devices[port_id];
1905
1906         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1907         dev_info->rx_desc_lim = lim;
1908         dev_info->tx_desc_lim = lim;
1909
1910         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1911         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1912         dev_info->driver_name = dev->device->driver->name;
1913         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1914         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1915 }
1916
1917 int
1918 rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1919                                  uint32_t *ptypes, int num)
1920 {
1921         int i, j;
1922         struct rte_eth_dev *dev;
1923         const uint32_t *all_ptypes;
1924
1925         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1926         dev = &rte_eth_devices[port_id];
1927         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1928         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1929
1930         if (!all_ptypes)
1931                 return 0;
1932
1933         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1934                 if (all_ptypes[i] & ptype_mask) {
1935                         if (j < num)
1936                                 ptypes[j] = all_ptypes[i];
1937                         j++;
1938                 }
1939
1940         return j;
1941 }
1942
1943 void
1944 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1945 {
1946         struct rte_eth_dev *dev;
1947
1948         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1949         dev = &rte_eth_devices[port_id];
1950         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1951 }
1952
1953
1954 int
1955 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1956 {
1957         struct rte_eth_dev *dev;
1958
1959         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1960
1961         dev = &rte_eth_devices[port_id];
1962         *mtu = dev->data->mtu;
1963         return 0;
1964 }
1965
1966 int
1967 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1968 {
1969         int ret;
1970         struct rte_eth_dev *dev;
1971
1972         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1973         dev = &rte_eth_devices[port_id];
1974         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1975
1976         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1977         if (!ret)
1978                 dev->data->mtu = mtu;
1979
1980         return ret;
1981 }
1982
1983 int
1984 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1985 {
1986         struct rte_eth_dev *dev;
1987         int ret;
1988
1989         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1990         dev = &rte_eth_devices[port_id];
1991         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1992                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1993                 return -ENOSYS;
1994         }
1995
1996         if (vlan_id > 4095) {
1997                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1998                                 port_id, (unsigned) vlan_id);
1999                 return -EINVAL;
2000         }
2001         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2002
2003         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2004         if (ret == 0) {
2005                 struct rte_vlan_filter_conf *vfc;
2006                 int vidx;
2007                 int vbit;
2008
2009                 vfc = &dev->data->vlan_filter_conf;
2010                 vidx = vlan_id / 64;
2011                 vbit = vlan_id % 64;
2012
2013                 if (on)
2014                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2015                 else
2016                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2017         }
2018
2019         return ret;
2020 }
2021
2022 int
2023 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
2024 {
2025         struct rte_eth_dev *dev;
2026
2027         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2028         dev = &rte_eth_devices[port_id];
2029         if (rx_queue_id >= dev->data->nb_rx_queues) {
2030                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2031                 return -EINVAL;
2032         }
2033
2034         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2035         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2036
2037         return 0;
2038 }
2039
2040 int
2041 rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
2042                                 enum rte_vlan_type vlan_type,
2043                                 uint16_t tpid)
2044 {
2045         struct rte_eth_dev *dev;
2046
2047         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2048         dev = &rte_eth_devices[port_id];
2049         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2050
2051         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
2052 }
2053
2054 int
2055 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
2056 {
2057         struct rte_eth_dev *dev;
2058         int ret = 0;
2059         int mask = 0;
2060         int cur, org = 0;
2061
2062         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2063         dev = &rte_eth_devices[port_id];
2064
2065         /*check which option changed by application*/
2066         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2067         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
2068         if (cur != org) {
2069                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
2070                 mask |= ETH_VLAN_STRIP_MASK;
2071         }
2072
2073         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2074         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
2075         if (cur != org) {
2076                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
2077                 mask |= ETH_VLAN_FILTER_MASK;
2078         }
2079
2080         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2081         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
2082         if (cur != org) {
2083                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
2084                 mask |= ETH_VLAN_EXTEND_MASK;
2085         }
2086
2087         /*no change*/
2088         if (mask == 0)
2089                 return ret;
2090
2091         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2092         (*dev->dev_ops->vlan_offload_set)(dev, mask);
2093
2094         return ret;
2095 }
2096
2097 int
2098 rte_eth_dev_get_vlan_offload(uint8_t port_id)
2099 {
2100         struct rte_eth_dev *dev;
2101         int ret = 0;
2102
2103         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2104         dev = &rte_eth_devices[port_id];
2105
2106         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2107                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2108
2109         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2110                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2111
2112         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2113                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2114
2115         return ret;
2116 }
2117
2118 int
2119 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
2120 {
2121         struct rte_eth_dev *dev;
2122
2123         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2124         dev = &rte_eth_devices[port_id];
2125         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2126         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
2127
2128         return 0;
2129 }
2130
2131 int
2132 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2133 {
2134         struct rte_eth_dev *dev;
2135
2136         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2137         dev = &rte_eth_devices[port_id];
2138         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2139         memset(fc_conf, 0, sizeof(*fc_conf));
2140         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2141 }
2142
2143 int
2144 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
2145 {
2146         struct rte_eth_dev *dev;
2147
2148         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2149         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2150                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2151                 return -EINVAL;
2152         }
2153
2154         dev = &rte_eth_devices[port_id];
2155         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2156         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2157 }
2158
2159 int
2160 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
2161 {
2162         struct rte_eth_dev *dev;
2163
2164         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2165         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2166                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2167                 return -EINVAL;
2168         }
2169
2170         dev = &rte_eth_devices[port_id];
2171         /* High water, low water validation are device specific */
2172         if  (*dev->dev_ops->priority_flow_ctrl_set)
2173                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2174         return -ENOTSUP;
2175 }
2176
2177 static int
2178 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2179                         uint16_t reta_size)
2180 {
2181         uint16_t i, num;
2182
2183         if (!reta_conf)
2184                 return -EINVAL;
2185
2186         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2187         for (i = 0; i < num; i++) {
2188                 if (reta_conf[i].mask)
2189                         return 0;
2190         }
2191
2192         return -EINVAL;
2193 }
2194
2195 static int
2196 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2197                          uint16_t reta_size,
2198                          uint16_t max_rxq)
2199 {
2200         uint16_t i, idx, shift;
2201
2202         if (!reta_conf)
2203                 return -EINVAL;
2204
2205         if (max_rxq == 0) {
2206                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2207                 return -EINVAL;
2208         }
2209
2210         for (i = 0; i < reta_size; i++) {
2211                 idx = i / RTE_RETA_GROUP_SIZE;
2212                 shift = i % RTE_RETA_GROUP_SIZE;
2213                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2214                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2215                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2216                                 "the maximum rxq index: %u\n", idx, shift,
2217                                 reta_conf[idx].reta[shift], max_rxq);
2218                         return -EINVAL;
2219                 }
2220         }
2221
2222         return 0;
2223 }
2224
2225 int
2226 rte_eth_dev_rss_reta_update(uint8_t port_id,
2227                             struct rte_eth_rss_reta_entry64 *reta_conf,
2228                             uint16_t reta_size)
2229 {
2230         struct rte_eth_dev *dev;
2231         int ret;
2232
2233         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2234         /* Check mask bits */
2235         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2236         if (ret < 0)
2237                 return ret;
2238
2239         dev = &rte_eth_devices[port_id];
2240
2241         /* Check entry value */
2242         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2243                                 dev->data->nb_rx_queues);
2244         if (ret < 0)
2245                 return ret;
2246
2247         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2248         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2249 }
2250
2251 int
2252 rte_eth_dev_rss_reta_query(uint8_t port_id,
2253                            struct rte_eth_rss_reta_entry64 *reta_conf,
2254                            uint16_t reta_size)
2255 {
2256         struct rte_eth_dev *dev;
2257         int ret;
2258
2259         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2260
2261         /* Check mask bits */
2262         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2263         if (ret < 0)
2264                 return ret;
2265
2266         dev = &rte_eth_devices[port_id];
2267         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2268         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2269 }
2270
2271 int
2272 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
2273 {
2274         struct rte_eth_dev *dev;
2275         uint16_t rss_hash_protos;
2276
2277         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2278         rss_hash_protos = rss_conf->rss_hf;
2279         if ((rss_hash_protos != 0) &&
2280             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
2281                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
2282                                 rss_hash_protos);
2283                 return -EINVAL;
2284         }
2285         dev = &rte_eth_devices[port_id];
2286         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2287         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2288 }
2289
2290 int
2291 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2292                               struct rte_eth_rss_conf *rss_conf)
2293 {
2294         struct rte_eth_dev *dev;
2295
2296         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2297         dev = &rte_eth_devices[port_id];
2298         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2299         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2300 }
2301
2302 int
2303 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
2304                                 struct rte_eth_udp_tunnel *udp_tunnel)
2305 {
2306         struct rte_eth_dev *dev;
2307
2308         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2309         if (udp_tunnel == NULL) {
2310                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2311                 return -EINVAL;
2312         }
2313
2314         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2315                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2316                 return -EINVAL;
2317         }
2318
2319         dev = &rte_eth_devices[port_id];
2320         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2321         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
2322 }
2323
2324 int
2325 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
2326                                    struct rte_eth_udp_tunnel *udp_tunnel)
2327 {
2328         struct rte_eth_dev *dev;
2329
2330         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2331         dev = &rte_eth_devices[port_id];
2332
2333         if (udp_tunnel == NULL) {
2334                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2335                 return -EINVAL;
2336         }
2337
2338         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2339                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2340                 return -EINVAL;
2341         }
2342
2343         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2344         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2345 }
2346
2347 int
2348 rte_eth_led_on(uint8_t port_id)
2349 {
2350         struct rte_eth_dev *dev;
2351
2352         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2353         dev = &rte_eth_devices[port_id];
2354         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2355         return (*dev->dev_ops->dev_led_on)(dev);
2356 }
2357
2358 int
2359 rte_eth_led_off(uint8_t port_id)
2360 {
2361         struct rte_eth_dev *dev;
2362
2363         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2364         dev = &rte_eth_devices[port_id];
2365         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2366         return (*dev->dev_ops->dev_led_off)(dev);
2367 }
2368
2369 /*
2370  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2371  * an empty spot.
2372  */
2373 static int
2374 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2375 {
2376         struct rte_eth_dev_info dev_info;
2377         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2378         unsigned i;
2379
2380         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2381         rte_eth_dev_info_get(port_id, &dev_info);
2382
2383         for (i = 0; i < dev_info.max_mac_addrs; i++)
2384                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2385                         return i;
2386
2387         return -1;
2388 }
2389
2390 static const struct ether_addr null_mac_addr;
2391
2392 int
2393 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2394                         uint32_t pool)
2395 {
2396         struct rte_eth_dev *dev;
2397         int index;
2398         uint64_t pool_mask;
2399         int ret;
2400
2401         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2402         dev = &rte_eth_devices[port_id];
2403         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2404
2405         if (is_zero_ether_addr(addr)) {
2406                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2407                         port_id);
2408                 return -EINVAL;
2409         }
2410         if (pool >= ETH_64_POOLS) {
2411                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2412                 return -EINVAL;
2413         }
2414
2415         index = get_mac_addr_index(port_id, addr);
2416         if (index < 0) {
2417                 index = get_mac_addr_index(port_id, &null_mac_addr);
2418                 if (index < 0) {
2419                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2420                                 port_id);
2421                         return -ENOSPC;
2422                 }
2423         } else {
2424                 pool_mask = dev->data->mac_pool_sel[index];
2425
2426                 /* Check if both MAC address and pool is already there, and do nothing */
2427                 if (pool_mask & (1ULL << pool))
2428                         return 0;
2429         }
2430
2431         /* Update NIC */
2432         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2433
2434         if (ret == 0) {
2435                 /* Update address in NIC data structure */
2436                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2437
2438                 /* Update pool bitmap in NIC data structure */
2439                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2440         }
2441
2442         return ret;
2443 }
2444
2445 int
2446 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2447 {
2448         struct rte_eth_dev *dev;
2449         int index;
2450
2451         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2452         dev = &rte_eth_devices[port_id];
2453         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2454
2455         index = get_mac_addr_index(port_id, addr);
2456         if (index == 0) {
2457                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2458                 return -EADDRINUSE;
2459         } else if (index < 0)
2460                 return 0;  /* Do nothing if address wasn't found */
2461
2462         /* Update NIC */
2463         (*dev->dev_ops->mac_addr_remove)(dev, index);
2464
2465         /* Update address in NIC data structure */
2466         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2467
2468         /* reset pool bitmap */
2469         dev->data->mac_pool_sel[index] = 0;
2470
2471         return 0;
2472 }
2473
2474 int
2475 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2476 {
2477         struct rte_eth_dev *dev;
2478
2479         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2480
2481         if (!is_valid_assigned_ether_addr(addr))
2482                 return -EINVAL;
2483
2484         dev = &rte_eth_devices[port_id];
2485         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2486
2487         /* Update default address in NIC data structure */
2488         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2489
2490         (*dev->dev_ops->mac_addr_set)(dev, addr);
2491
2492         return 0;
2493 }
2494
2495
2496 /*
2497  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2498  * an empty spot.
2499  */
2500 static int
2501 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2502 {
2503         struct rte_eth_dev_info dev_info;
2504         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2505         unsigned i;
2506
2507         rte_eth_dev_info_get(port_id, &dev_info);
2508         if (!dev->data->hash_mac_addrs)
2509                 return -1;
2510
2511         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2512                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2513                         ETHER_ADDR_LEN) == 0)
2514                         return i;
2515
2516         return -1;
2517 }
2518
2519 int
2520 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2521                                 uint8_t on)
2522 {
2523         int index;
2524         int ret;
2525         struct rte_eth_dev *dev;
2526
2527         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2528
2529         dev = &rte_eth_devices[port_id];
2530         if (is_zero_ether_addr(addr)) {
2531                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2532                         port_id);
2533                 return -EINVAL;
2534         }
2535
2536         index = get_hash_mac_addr_index(port_id, addr);
2537         /* Check if it's already there, and do nothing */
2538         if ((index >= 0) && (on))
2539                 return 0;
2540
2541         if (index < 0) {
2542                 if (!on) {
2543                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2544                                 "set in UTA\n", port_id);
2545                         return -EINVAL;
2546                 }
2547
2548                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2549                 if (index < 0) {
2550                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2551                                         port_id);
2552                         return -ENOSPC;
2553                 }
2554         }
2555
2556         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2557         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2558         if (ret == 0) {
2559                 /* Update address in NIC data structure */
2560                 if (on)
2561                         ether_addr_copy(addr,
2562                                         &dev->data->hash_mac_addrs[index]);
2563                 else
2564                         ether_addr_copy(&null_mac_addr,
2565                                         &dev->data->hash_mac_addrs[index]);
2566         }
2567
2568         return ret;
2569 }
2570
2571 int
2572 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2573 {
2574         struct rte_eth_dev *dev;
2575
2576         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2577
2578         dev = &rte_eth_devices[port_id];
2579
2580         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2581         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2582 }
2583
2584 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2585                                         uint16_t tx_rate)
2586 {
2587         struct rte_eth_dev *dev;
2588         struct rte_eth_dev_info dev_info;
2589         struct rte_eth_link link;
2590
2591         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2592
2593         dev = &rte_eth_devices[port_id];
2594         rte_eth_dev_info_get(port_id, &dev_info);
2595         link = dev->data->dev_link;
2596
2597         if (queue_idx > dev_info.max_tx_queues) {
2598                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2599                                 "invalid queue id=%d\n", port_id, queue_idx);
2600                 return -EINVAL;
2601         }
2602
2603         if (tx_rate > link.link_speed) {
2604                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2605                                 "bigger than link speed= %d\n",
2606                         tx_rate, link.link_speed);
2607                 return -EINVAL;
2608         }
2609
2610         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2611         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2612 }
2613
2614 int
2615 rte_eth_mirror_rule_set(uint8_t port_id,
2616                         struct rte_eth_mirror_conf *mirror_conf,
2617                         uint8_t rule_id, uint8_t on)
2618 {
2619         struct rte_eth_dev *dev;
2620
2621         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2622         if (mirror_conf->rule_type == 0) {
2623                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2624                 return -EINVAL;
2625         }
2626
2627         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2628                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2629                                 ETH_64_POOLS - 1);
2630                 return -EINVAL;
2631         }
2632
2633         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2634              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2635             (mirror_conf->pool_mask == 0)) {
2636                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2637                 return -EINVAL;
2638         }
2639
2640         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2641             mirror_conf->vlan.vlan_mask == 0) {
2642                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2643                 return -EINVAL;
2644         }
2645
2646         dev = &rte_eth_devices[port_id];
2647         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2648
2649         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2650 }
2651
2652 int
2653 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2654 {
2655         struct rte_eth_dev *dev;
2656
2657         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2658
2659         dev = &rte_eth_devices[port_id];
2660         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2661
2662         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2663 }
2664
2665 int
2666 rte_eth_dev_callback_register(uint8_t port_id,
2667                         enum rte_eth_event_type event,
2668                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2669 {
2670         struct rte_eth_dev *dev;
2671         struct rte_eth_dev_callback *user_cb;
2672
2673         if (!cb_fn)
2674                 return -EINVAL;
2675
2676         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2677
2678         dev = &rte_eth_devices[port_id];
2679         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2680
2681         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2682                 if (user_cb->cb_fn == cb_fn &&
2683                         user_cb->cb_arg == cb_arg &&
2684                         user_cb->event == event) {
2685                         break;
2686                 }
2687         }
2688
2689         /* create a new callback. */
2690         if (user_cb == NULL) {
2691                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2692                                         sizeof(struct rte_eth_dev_callback), 0);
2693                 if (user_cb != NULL) {
2694                         user_cb->cb_fn = cb_fn;
2695                         user_cb->cb_arg = cb_arg;
2696                         user_cb->event = event;
2697                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2698                 }
2699         }
2700
2701         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2702         return (user_cb == NULL) ? -ENOMEM : 0;
2703 }
2704
2705 int
2706 rte_eth_dev_callback_unregister(uint8_t port_id,
2707                         enum rte_eth_event_type event,
2708                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2709 {
2710         int ret;
2711         struct rte_eth_dev *dev;
2712         struct rte_eth_dev_callback *cb, *next;
2713
2714         if (!cb_fn)
2715                 return -EINVAL;
2716
2717         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2718
2719         dev = &rte_eth_devices[port_id];
2720         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2721
2722         ret = 0;
2723         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2724
2725                 next = TAILQ_NEXT(cb, next);
2726
2727                 if (cb->cb_fn != cb_fn || cb->event != event ||
2728                                 (cb->cb_arg != (void *)-1 &&
2729                                 cb->cb_arg != cb_arg))
2730                         continue;
2731
2732                 /*
2733                  * if this callback is not executing right now,
2734                  * then remove it.
2735                  */
2736                 if (cb->active == 0) {
2737                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2738                         rte_free(cb);
2739                 } else {
2740                         ret = -EAGAIN;
2741                 }
2742         }
2743
2744         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2745         return ret;
2746 }
2747
2748 int
2749 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2750         enum rte_eth_event_type event, void *cb_arg, void *ret_param)
2751 {
2752         struct rte_eth_dev_callback *cb_lst;
2753         struct rte_eth_dev_callback dev_cb;
2754         int rc = 0;
2755
2756         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2757         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2758                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2759                         continue;
2760                 dev_cb = *cb_lst;
2761                 cb_lst->active = 1;
2762                 if (cb_arg != NULL)
2763                         dev_cb.cb_arg = cb_arg;
2764                 if (ret_param != NULL)
2765                         dev_cb.ret_param = ret_param;
2766
2767                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2768                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2769                                 dev_cb.cb_arg, dev_cb.ret_param);
2770                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2771                 cb_lst->active = 0;
2772         }
2773         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2774         return rc;
2775 }
2776
2777 int
2778 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2779 {
2780         uint32_t vec;
2781         struct rte_eth_dev *dev;
2782         struct rte_intr_handle *intr_handle;
2783         uint16_t qid;
2784         int rc;
2785
2786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2787
2788         dev = &rte_eth_devices[port_id];
2789
2790         if (!dev->intr_handle) {
2791                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
2792                 return -ENOTSUP;
2793         }
2794
2795         intr_handle = dev->intr_handle;
2796         if (!intr_handle->intr_vec) {
2797                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2798                 return -EPERM;
2799         }
2800
2801         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2802                 vec = intr_handle->intr_vec[qid];
2803                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2804                 if (rc && rc != -EEXIST) {
2805                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2806                                         " op %d epfd %d vec %u\n",
2807                                         port_id, qid, op, epfd, vec);
2808                 }
2809         }
2810
2811         return 0;
2812 }
2813
2814 const struct rte_memzone *
2815 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2816                          uint16_t queue_id, size_t size, unsigned align,
2817                          int socket_id)
2818 {
2819         char z_name[RTE_MEMZONE_NAMESIZE];
2820         const struct rte_memzone *mz;
2821
2822         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2823                  dev->device->driver->name, ring_name,
2824                  dev->data->port_id, queue_id);
2825
2826         mz = rte_memzone_lookup(z_name);
2827         if (mz)
2828                 return mz;
2829
2830         if (rte_xen_dom0_supported())
2831                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2832                                                    0, align, RTE_PGSIZE_2M);
2833         else
2834                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2835                                                    0, align);
2836 }
2837
2838 int
2839 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2840                           int epfd, int op, void *data)
2841 {
2842         uint32_t vec;
2843         struct rte_eth_dev *dev;
2844         struct rte_intr_handle *intr_handle;
2845         int rc;
2846
2847         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2848
2849         dev = &rte_eth_devices[port_id];
2850         if (queue_id >= dev->data->nb_rx_queues) {
2851                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2852                 return -EINVAL;
2853         }
2854
2855         if (!dev->intr_handle) {
2856                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
2857                 return -ENOTSUP;
2858         }
2859
2860         intr_handle = dev->intr_handle;
2861         if (!intr_handle->intr_vec) {
2862                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2863                 return -EPERM;
2864         }
2865
2866         vec = intr_handle->intr_vec[queue_id];
2867         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2868         if (rc && rc != -EEXIST) {
2869                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2870                                 " op %d epfd %d vec %u\n",
2871                                 port_id, queue_id, op, epfd, vec);
2872                 return rc;
2873         }
2874
2875         return 0;
2876 }
2877
2878 int
2879 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2880                            uint16_t queue_id)
2881 {
2882         struct rte_eth_dev *dev;
2883
2884         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2885
2886         dev = &rte_eth_devices[port_id];
2887
2888         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2889         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2890 }
2891
2892 int
2893 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2894                             uint16_t queue_id)
2895 {
2896         struct rte_eth_dev *dev;
2897
2898         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2899
2900         dev = &rte_eth_devices[port_id];
2901
2902         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2903         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2904 }
2905
2906
2907 int
2908 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2909 {
2910         struct rte_eth_dev *dev;
2911
2912         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2913
2914         dev = &rte_eth_devices[port_id];
2915         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2916         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2917                                 RTE_ETH_FILTER_NOP, NULL);
2918 }
2919
2920 int
2921 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2922                        enum rte_filter_op filter_op, void *arg)
2923 {
2924         struct rte_eth_dev *dev;
2925
2926         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2927
2928         dev = &rte_eth_devices[port_id];
2929         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2930         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2931 }
2932
2933 void *
2934 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2935                 rte_rx_callback_fn fn, void *user_param)
2936 {
2937 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2938         rte_errno = ENOTSUP;
2939         return NULL;
2940 #endif
2941         /* check input parameters */
2942         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2943                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2944                 rte_errno = EINVAL;
2945                 return NULL;
2946         }
2947         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2948
2949         if (cb == NULL) {
2950                 rte_errno = ENOMEM;
2951                 return NULL;
2952         }
2953
2954         cb->fn.rx = fn;
2955         cb->param = user_param;
2956
2957         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2958         /* Add the callbacks in fifo order. */
2959         struct rte_eth_rxtx_callback *tail =
2960                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2961
2962         if (!tail) {
2963                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2964
2965         } else {
2966                 while (tail->next)
2967                         tail = tail->next;
2968                 tail->next = cb;
2969         }
2970         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2971
2972         return cb;
2973 }
2974
2975 void *
2976 rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
2977                 rte_rx_callback_fn fn, void *user_param)
2978 {
2979 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2980         rte_errno = ENOTSUP;
2981         return NULL;
2982 #endif
2983         /* check input parameters */
2984         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2985                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2986                 rte_errno = EINVAL;
2987                 return NULL;
2988         }
2989
2990         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2991
2992         if (cb == NULL) {
2993                 rte_errno = ENOMEM;
2994                 return NULL;
2995         }
2996
2997         cb->fn.rx = fn;
2998         cb->param = user_param;
2999
3000         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3001         /* Add the callbacks at fisrt position*/
3002         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3003         rte_smp_wmb();
3004         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3005         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3006
3007         return cb;
3008 }
3009
3010 void *
3011 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
3012                 rte_tx_callback_fn fn, void *user_param)
3013 {
3014 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3015         rte_errno = ENOTSUP;
3016         return NULL;
3017 #endif
3018         /* check input parameters */
3019         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3020                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3021                 rte_errno = EINVAL;
3022                 return NULL;
3023         }
3024
3025         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3026
3027         if (cb == NULL) {
3028                 rte_errno = ENOMEM;
3029                 return NULL;
3030         }
3031
3032         cb->fn.tx = fn;
3033         cb->param = user_param;
3034
3035         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3036         /* Add the callbacks in fifo order. */
3037         struct rte_eth_rxtx_callback *tail =
3038                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3039
3040         if (!tail) {
3041                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3042
3043         } else {
3044                 while (tail->next)
3045                         tail = tail->next;
3046                 tail->next = cb;
3047         }
3048         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3049
3050         return cb;
3051 }
3052
3053 int
3054 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
3055                 struct rte_eth_rxtx_callback *user_cb)
3056 {
3057 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3058         return -ENOTSUP;
3059 #endif
3060         /* Check input parameters. */
3061         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3062         if (user_cb == NULL ||
3063                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3064                 return -EINVAL;
3065
3066         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3067         struct rte_eth_rxtx_callback *cb;
3068         struct rte_eth_rxtx_callback **prev_cb;
3069         int ret = -EINVAL;
3070
3071         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3072         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3073         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3074                 cb = *prev_cb;
3075                 if (cb == user_cb) {
3076                         /* Remove the user cb from the callback list. */
3077                         *prev_cb = cb->next;
3078                         ret = 0;
3079                         break;
3080                 }
3081         }
3082         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3083
3084         return ret;
3085 }
3086
3087 int
3088 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3089                 struct rte_eth_rxtx_callback *user_cb)
3090 {
3091 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3092         return -ENOTSUP;
3093 #endif
3094         /* Check input parameters. */
3095         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3096         if (user_cb == NULL ||
3097                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3098                 return -EINVAL;
3099
3100         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3101         int ret = -EINVAL;
3102         struct rte_eth_rxtx_callback *cb;
3103         struct rte_eth_rxtx_callback **prev_cb;
3104
3105         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3106         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3107         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3108                 cb = *prev_cb;
3109                 if (cb == user_cb) {
3110                         /* Remove the user cb from the callback list. */
3111                         *prev_cb = cb->next;
3112                         ret = 0;
3113                         break;
3114                 }
3115         }
3116         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3117
3118         return ret;
3119 }
3120
3121 int
3122 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3123         struct rte_eth_rxq_info *qinfo)
3124 {
3125         struct rte_eth_dev *dev;
3126
3127         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3128
3129         if (qinfo == NULL)
3130                 return -EINVAL;
3131
3132         dev = &rte_eth_devices[port_id];
3133         if (queue_id >= dev->data->nb_rx_queues) {
3134                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3135                 return -EINVAL;
3136         }
3137
3138         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3139
3140         memset(qinfo, 0, sizeof(*qinfo));
3141         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3142         return 0;
3143 }
3144
3145 int
3146 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3147         struct rte_eth_txq_info *qinfo)
3148 {
3149         struct rte_eth_dev *dev;
3150
3151         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3152
3153         if (qinfo == NULL)
3154                 return -EINVAL;
3155
3156         dev = &rte_eth_devices[port_id];
3157         if (queue_id >= dev->data->nb_tx_queues) {
3158                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3159                 return -EINVAL;
3160         }
3161
3162         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3163
3164         memset(qinfo, 0, sizeof(*qinfo));
3165         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3166         return 0;
3167 }
3168
3169 int
3170 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3171                              struct ether_addr *mc_addr_set,
3172                              uint32_t nb_mc_addr)
3173 {
3174         struct rte_eth_dev *dev;
3175
3176         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3177
3178         dev = &rte_eth_devices[port_id];
3179         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3180         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3181 }
3182
3183 int
3184 rte_eth_timesync_enable(uint8_t port_id)
3185 {
3186         struct rte_eth_dev *dev;
3187
3188         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3189         dev = &rte_eth_devices[port_id];
3190
3191         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3192         return (*dev->dev_ops->timesync_enable)(dev);
3193 }
3194
3195 int
3196 rte_eth_timesync_disable(uint8_t port_id)
3197 {
3198         struct rte_eth_dev *dev;
3199
3200         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3201         dev = &rte_eth_devices[port_id];
3202
3203         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3204         return (*dev->dev_ops->timesync_disable)(dev);
3205 }
3206
3207 int
3208 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3209                                    uint32_t flags)
3210 {
3211         struct rte_eth_dev *dev;
3212
3213         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3214         dev = &rte_eth_devices[port_id];
3215
3216         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3217         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3218 }
3219
3220 int
3221 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3222 {
3223         struct rte_eth_dev *dev;
3224
3225         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3226         dev = &rte_eth_devices[port_id];
3227
3228         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3229         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3230 }
3231
3232 int
3233 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3234 {
3235         struct rte_eth_dev *dev;
3236
3237         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3238         dev = &rte_eth_devices[port_id];
3239
3240         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3241         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3242 }
3243
3244 int
3245 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3246 {
3247         struct rte_eth_dev *dev;
3248
3249         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3250         dev = &rte_eth_devices[port_id];
3251
3252         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3253         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3254 }
3255
3256 int
3257 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3258 {
3259         struct rte_eth_dev *dev;
3260
3261         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3262         dev = &rte_eth_devices[port_id];
3263
3264         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3265         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3266 }
3267
3268 int
3269 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3270 {
3271         struct rte_eth_dev *dev;
3272
3273         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3274
3275         dev = &rte_eth_devices[port_id];
3276         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3277         return (*dev->dev_ops->get_reg)(dev, info);
3278 }
3279
3280 int
3281 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3282 {
3283         struct rte_eth_dev *dev;
3284
3285         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3286
3287         dev = &rte_eth_devices[port_id];
3288         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3289         return (*dev->dev_ops->get_eeprom_length)(dev);
3290 }
3291
3292 int
3293 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3294 {
3295         struct rte_eth_dev *dev;
3296
3297         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3298
3299         dev = &rte_eth_devices[port_id];
3300         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3301         return (*dev->dev_ops->get_eeprom)(dev, info);
3302 }
3303
3304 int
3305 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3306 {
3307         struct rte_eth_dev *dev;
3308
3309         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3310
3311         dev = &rte_eth_devices[port_id];
3312         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3313         return (*dev->dev_ops->set_eeprom)(dev, info);
3314 }
3315
3316 int
3317 rte_eth_dev_get_dcb_info(uint8_t port_id,
3318                              struct rte_eth_dcb_info *dcb_info)
3319 {
3320         struct rte_eth_dev *dev;
3321
3322         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3323
3324         dev = &rte_eth_devices[port_id];
3325         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3326
3327         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3328         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3329 }
3330
3331 int
3332 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3333                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3334 {
3335         struct rte_eth_dev *dev;
3336
3337         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3338         if (l2_tunnel == NULL) {
3339                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3340                 return -EINVAL;
3341         }
3342
3343         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3344                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3345                 return -EINVAL;
3346         }
3347
3348         dev = &rte_eth_devices[port_id];
3349         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3350                                 -ENOTSUP);
3351         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3352 }
3353
3354 int
3355 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3356                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3357                                   uint32_t mask,
3358                                   uint8_t en)
3359 {
3360         struct rte_eth_dev *dev;
3361
3362         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3363
3364         if (l2_tunnel == NULL) {
3365                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3366                 return -EINVAL;
3367         }
3368
3369         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3370                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3371                 return -EINVAL;
3372         }
3373
3374         if (mask == 0) {
3375                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3376                 return -EINVAL;
3377         }
3378
3379         dev = &rte_eth_devices[port_id];
3380         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3381                                 -ENOTSUP);
3382         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3383 }
3384
3385 static void
3386 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3387                            const struct rte_eth_desc_lim *desc_lim)
3388 {
3389         if (desc_lim->nb_align != 0)
3390                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3391
3392         if (desc_lim->nb_max != 0)
3393                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3394
3395         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3396 }
3397
3398 int
3399 rte_eth_dev_adjust_nb_rx_tx_desc(uint8_t port_id,
3400                                  uint16_t *nb_rx_desc,
3401                                  uint16_t *nb_tx_desc)
3402 {
3403         struct rte_eth_dev *dev;
3404         struct rte_eth_dev_info dev_info;
3405
3406         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3407
3408         dev = &rte_eth_devices[port_id];
3409         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3410
3411         rte_eth_dev_info_get(port_id, &dev_info);
3412
3413         if (nb_rx_desc != NULL)
3414                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3415
3416         if (nb_tx_desc != NULL)
3417                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3418
3419         return 0;
3420 }