ethdev: fix missing imissed counter in xstats
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37
38 #include "rte_ether.h"
39 #include "rte_ethdev.h"
40 #include "ethdev_profile.h"
41
42 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
43 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
44 static struct rte_eth_dev_data *rte_eth_dev_data;
45 static uint8_t eth_dev_last_created_port;
46
47 /* spinlock for eth device callbacks */
48 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* spinlock for add/remove rx callbacks */
51 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
52
53 /* spinlock for add/remove tx callbacks */
54 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* store statistics names and its offset in stats structure  */
57 struct rte_eth_xstats_name_off {
58         char name[RTE_ETH_XSTATS_NAME_SIZE];
59         unsigned offset;
60 };
61
62 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
63         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
64         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
65         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
66         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
67         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
68         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
69         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
70         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
71                 rx_nombuf)},
72 };
73
74 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
75
76 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
77         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
78         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
79         {"errors", offsetof(struct rte_eth_stats, q_errors)},
80 };
81
82 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
83                 sizeof(rte_rxq_stats_strings[0]))
84
85 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
86         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
87         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
88 };
89 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
90                 sizeof(rte_txq_stats_strings[0]))
91
92
93 /**
94  * The user application callback description.
95  *
96  * It contains callback address to be registered by user application,
97  * the pointer to the parameters for callback, and the event type.
98  */
99 struct rte_eth_dev_callback {
100         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
101         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
102         void *cb_arg;                           /**< Parameter for callback */
103         void *ret_param;                        /**< Return parameter */
104         enum rte_eth_event_type event;          /**< Interrupt event type */
105         uint32_t active;                        /**< Callback is executing */
106 };
107
108 enum {
109         STAT_QMAP_TX = 0,
110         STAT_QMAP_RX
111 };
112
113 uint16_t
114 rte_eth_find_next(uint16_t port_id)
115 {
116         while (port_id < RTE_MAX_ETHPORTS &&
117                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
118                 port_id++;
119
120         if (port_id >= RTE_MAX_ETHPORTS)
121                 return RTE_MAX_ETHPORTS;
122
123         return port_id;
124 }
125
126 static void
127 rte_eth_dev_data_alloc(void)
128 {
129         const unsigned flags = 0;
130         const struct rte_memzone *mz;
131
132         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
133                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
134                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
135                                 rte_socket_id(), flags);
136         } else
137                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
138         if (mz == NULL)
139                 rte_panic("Cannot allocate memzone for ethernet port data\n");
140
141         rte_eth_dev_data = mz->addr;
142         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
143                 memset(rte_eth_dev_data, 0,
144                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
145 }
146
147 struct rte_eth_dev *
148 rte_eth_dev_allocated(const char *name)
149 {
150         unsigned i;
151
152         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
153                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
154                     strcmp(rte_eth_devices[i].data->name, name) == 0)
155                         return &rte_eth_devices[i];
156         }
157         return NULL;
158 }
159
160 static uint16_t
161 rte_eth_dev_find_free_port(void)
162 {
163         unsigned i;
164
165         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
166                 if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED)
167                         return i;
168         }
169         return RTE_MAX_ETHPORTS;
170 }
171
172 static struct rte_eth_dev *
173 eth_dev_get(uint16_t port_id)
174 {
175         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
176
177         eth_dev->data = &rte_eth_dev_data[port_id];
178         eth_dev->state = RTE_ETH_DEV_ATTACHED;
179         TAILQ_INIT(&(eth_dev->link_intr_cbs));
180
181         eth_dev_last_created_port = port_id;
182
183         return eth_dev;
184 }
185
186 struct rte_eth_dev *
187 rte_eth_dev_allocate(const char *name)
188 {
189         uint16_t port_id;
190         struct rte_eth_dev *eth_dev;
191
192         port_id = rte_eth_dev_find_free_port();
193         if (port_id == RTE_MAX_ETHPORTS) {
194                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
195                 return NULL;
196         }
197
198         if (rte_eth_dev_data == NULL)
199                 rte_eth_dev_data_alloc();
200
201         if (rte_eth_dev_allocated(name) != NULL) {
202                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
203                                 name);
204                 return NULL;
205         }
206
207         memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data));
208         eth_dev = eth_dev_get(port_id);
209         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
210         eth_dev->data->port_id = port_id;
211         eth_dev->data->mtu = ETHER_MTU;
212
213         return eth_dev;
214 }
215
216 /*
217  * Attach to a port already registered by the primary process, which
218  * makes sure that the same device would have the same port id both
219  * in the primary and secondary process.
220  */
221 struct rte_eth_dev *
222 rte_eth_dev_attach_secondary(const char *name)
223 {
224         uint16_t i;
225         struct rte_eth_dev *eth_dev;
226
227         if (rte_eth_dev_data == NULL)
228                 rte_eth_dev_data_alloc();
229
230         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
231                 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
232                         break;
233         }
234         if (i == RTE_MAX_ETHPORTS) {
235                 RTE_PMD_DEBUG_TRACE(
236                         "device %s is not driven by the primary process\n",
237                         name);
238                 return NULL;
239         }
240
241         eth_dev = eth_dev_get(i);
242         RTE_ASSERT(eth_dev->data->port_id == i);
243
244         return eth_dev;
245 }
246
247 int
248 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
249 {
250         if (eth_dev == NULL)
251                 return -EINVAL;
252
253         eth_dev->state = RTE_ETH_DEV_UNUSED;
254         return 0;
255 }
256
257 int
258 rte_eth_dev_is_valid_port(uint16_t port_id)
259 {
260         if (port_id >= RTE_MAX_ETHPORTS ||
261             (rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
262              rte_eth_devices[port_id].state != RTE_ETH_DEV_DEFERRED))
263                 return 0;
264         else
265                 return 1;
266 }
267
268 int
269 rte_eth_dev_socket_id(uint16_t port_id)
270 {
271         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
272         return rte_eth_devices[port_id].data->numa_node;
273 }
274
275 void *
276 rte_eth_dev_get_sec_ctx(uint8_t port_id)
277 {
278         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
279         return rte_eth_devices[port_id].security_ctx;
280 }
281
282 uint16_t
283 rte_eth_dev_count(void)
284 {
285         uint16_t p;
286         uint16_t count;
287
288         count = 0;
289
290         RTE_ETH_FOREACH_DEV(p)
291                 count++;
292
293         return count;
294 }
295
296 int
297 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
298 {
299         char *tmp;
300
301         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
302
303         if (name == NULL) {
304                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
305                 return -EINVAL;
306         }
307
308         /* shouldn't check 'rte_eth_devices[i].data',
309          * because it might be overwritten by VDEV PMD */
310         tmp = rte_eth_dev_data[port_id].name;
311         strcpy(name, tmp);
312         return 0;
313 }
314
315 int
316 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
317 {
318         int i;
319
320         if (name == NULL) {
321                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
322                 return -EINVAL;
323         }
324
325         RTE_ETH_FOREACH_DEV(i) {
326                 if (!strncmp(name,
327                         rte_eth_dev_data[i].name, strlen(name))) {
328
329                         *port_id = i;
330
331                         return 0;
332                 }
333         }
334         return -ENODEV;
335 }
336
337 /* attach the new device, then store port_id of the device */
338 int
339 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
340 {
341         int ret = -1;
342         int current = rte_eth_dev_count();
343         char *name = NULL;
344         char *args = NULL;
345
346         if ((devargs == NULL) || (port_id == NULL)) {
347                 ret = -EINVAL;
348                 goto err;
349         }
350
351         /* parse devargs, then retrieve device name and args */
352         if (rte_eal_parse_devargs_str(devargs, &name, &args))
353                 goto err;
354
355         ret = rte_eal_dev_attach(name, args);
356         if (ret < 0)
357                 goto err;
358
359         /* no point looking at the port count if no port exists */
360         if (!rte_eth_dev_count()) {
361                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
362                 ret = -1;
363                 goto err;
364         }
365
366         /* if nothing happened, there is a bug here, since some driver told us
367          * it did attach a device, but did not create a port.
368          */
369         if (current == rte_eth_dev_count()) {
370                 ret = -1;
371                 goto err;
372         }
373
374         *port_id = eth_dev_last_created_port;
375         ret = 0;
376
377 err:
378         free(name);
379         free(args);
380         return ret;
381 }
382
383 /* detach the device, then store the name of the device */
384 int
385 rte_eth_dev_detach(uint16_t port_id, char *name)
386 {
387         uint32_t dev_flags;
388         int ret = -1;
389
390         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
391
392         if (name == NULL) {
393                 ret = -EINVAL;
394                 goto err;
395         }
396
397         dev_flags = rte_eth_devices[port_id].data->dev_flags;
398         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
399                 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
400                         port_id);
401                 ret = -ENOTSUP;
402                 goto err;
403         }
404
405         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
406                  "%s", rte_eth_devices[port_id].data->name);
407
408         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
409         if (ret < 0)
410                 goto err;
411
412         rte_eth_devices[port_id].state = RTE_ETH_DEV_UNUSED;
413         return 0;
414
415 err:
416         return ret;
417 }
418
419 static int
420 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
421 {
422         uint16_t old_nb_queues = dev->data->nb_rx_queues;
423         void **rxq;
424         unsigned i;
425
426         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
427                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
428                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
429                                 RTE_CACHE_LINE_SIZE);
430                 if (dev->data->rx_queues == NULL) {
431                         dev->data->nb_rx_queues = 0;
432                         return -(ENOMEM);
433                 }
434         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
435                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
436
437                 rxq = dev->data->rx_queues;
438
439                 for (i = nb_queues; i < old_nb_queues; i++)
440                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
441                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
442                                 RTE_CACHE_LINE_SIZE);
443                 if (rxq == NULL)
444                         return -(ENOMEM);
445                 if (nb_queues > old_nb_queues) {
446                         uint16_t new_qs = nb_queues - old_nb_queues;
447
448                         memset(rxq + old_nb_queues, 0,
449                                 sizeof(rxq[0]) * new_qs);
450                 }
451
452                 dev->data->rx_queues = rxq;
453
454         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
455                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
456
457                 rxq = dev->data->rx_queues;
458
459                 for (i = nb_queues; i < old_nb_queues; i++)
460                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
461
462                 rte_free(dev->data->rx_queues);
463                 dev->data->rx_queues = NULL;
464         }
465         dev->data->nb_rx_queues = nb_queues;
466         return 0;
467 }
468
469 int
470 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
471 {
472         struct rte_eth_dev *dev;
473
474         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
475
476         dev = &rte_eth_devices[port_id];
477         if (rx_queue_id >= dev->data->nb_rx_queues) {
478                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
479                 return -EINVAL;
480         }
481
482         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
483
484         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
485                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
486                         " already started\n",
487                         rx_queue_id, port_id);
488                 return 0;
489         }
490
491         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
492
493 }
494
495 int
496 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
497 {
498         struct rte_eth_dev *dev;
499
500         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
501
502         dev = &rte_eth_devices[port_id];
503         if (rx_queue_id >= dev->data->nb_rx_queues) {
504                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
505                 return -EINVAL;
506         }
507
508         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
509
510         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
511                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
512                         " already stopped\n",
513                         rx_queue_id, port_id);
514                 return 0;
515         }
516
517         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
518
519 }
520
521 int
522 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
523 {
524         struct rte_eth_dev *dev;
525
526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
527
528         dev = &rte_eth_devices[port_id];
529         if (tx_queue_id >= dev->data->nb_tx_queues) {
530                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
531                 return -EINVAL;
532         }
533
534         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
535
536         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
537                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
538                         " already started\n",
539                         tx_queue_id, port_id);
540                 return 0;
541         }
542
543         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
544
545 }
546
547 int
548 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
549 {
550         struct rte_eth_dev *dev;
551
552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
553
554         dev = &rte_eth_devices[port_id];
555         if (tx_queue_id >= dev->data->nb_tx_queues) {
556                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
557                 return -EINVAL;
558         }
559
560         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
561
562         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
563                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
564                         " already stopped\n",
565                         tx_queue_id, port_id);
566                 return 0;
567         }
568
569         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
570
571 }
572
573 static int
574 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
575 {
576         uint16_t old_nb_queues = dev->data->nb_tx_queues;
577         void **txq;
578         unsigned i;
579
580         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
581                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
582                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
583                                                    RTE_CACHE_LINE_SIZE);
584                 if (dev->data->tx_queues == NULL) {
585                         dev->data->nb_tx_queues = 0;
586                         return -(ENOMEM);
587                 }
588         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
589                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
590
591                 txq = dev->data->tx_queues;
592
593                 for (i = nb_queues; i < old_nb_queues; i++)
594                         (*dev->dev_ops->tx_queue_release)(txq[i]);
595                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
596                                   RTE_CACHE_LINE_SIZE);
597                 if (txq == NULL)
598                         return -ENOMEM;
599                 if (nb_queues > old_nb_queues) {
600                         uint16_t new_qs = nb_queues - old_nb_queues;
601
602                         memset(txq + old_nb_queues, 0,
603                                sizeof(txq[0]) * new_qs);
604                 }
605
606                 dev->data->tx_queues = txq;
607
608         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
609                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
610
611                 txq = dev->data->tx_queues;
612
613                 for (i = nb_queues; i < old_nb_queues; i++)
614                         (*dev->dev_ops->tx_queue_release)(txq[i]);
615
616                 rte_free(dev->data->tx_queues);
617                 dev->data->tx_queues = NULL;
618         }
619         dev->data->nb_tx_queues = nb_queues;
620         return 0;
621 }
622
623 uint32_t
624 rte_eth_speed_bitflag(uint32_t speed, int duplex)
625 {
626         switch (speed) {
627         case ETH_SPEED_NUM_10M:
628                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
629         case ETH_SPEED_NUM_100M:
630                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
631         case ETH_SPEED_NUM_1G:
632                 return ETH_LINK_SPEED_1G;
633         case ETH_SPEED_NUM_2_5G:
634                 return ETH_LINK_SPEED_2_5G;
635         case ETH_SPEED_NUM_5G:
636                 return ETH_LINK_SPEED_5G;
637         case ETH_SPEED_NUM_10G:
638                 return ETH_LINK_SPEED_10G;
639         case ETH_SPEED_NUM_20G:
640                 return ETH_LINK_SPEED_20G;
641         case ETH_SPEED_NUM_25G:
642                 return ETH_LINK_SPEED_25G;
643         case ETH_SPEED_NUM_40G:
644                 return ETH_LINK_SPEED_40G;
645         case ETH_SPEED_NUM_50G:
646                 return ETH_LINK_SPEED_50G;
647         case ETH_SPEED_NUM_56G:
648                 return ETH_LINK_SPEED_56G;
649         case ETH_SPEED_NUM_100G:
650                 return ETH_LINK_SPEED_100G;
651         default:
652                 return 0;
653         }
654 }
655
656 /**
657  * A conversion function from rxmode bitfield API.
658  */
659 static void
660 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
661                                     uint64_t *rx_offloads)
662 {
663         uint64_t offloads = 0;
664
665         if (rxmode->header_split == 1)
666                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
667         if (rxmode->hw_ip_checksum == 1)
668                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
669         if (rxmode->hw_vlan_filter == 1)
670                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
671         if (rxmode->hw_vlan_strip == 1)
672                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
673         if (rxmode->hw_vlan_extend == 1)
674                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
675         if (rxmode->jumbo_frame == 1)
676                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
677         if (rxmode->hw_strip_crc == 1)
678                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
679         if (rxmode->enable_scatter == 1)
680                 offloads |= DEV_RX_OFFLOAD_SCATTER;
681         if (rxmode->enable_lro == 1)
682                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
683         if (rxmode->hw_timestamp == 1)
684                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
685         if (rxmode->security == 1)
686                 offloads |= DEV_RX_OFFLOAD_SECURITY;
687
688         *rx_offloads = offloads;
689 }
690
691 /**
692  * A conversion function from rxmode offloads API.
693  */
694 static void
695 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
696                             struct rte_eth_rxmode *rxmode)
697 {
698
699         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
700                 rxmode->header_split = 1;
701         else
702                 rxmode->header_split = 0;
703         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
704                 rxmode->hw_ip_checksum = 1;
705         else
706                 rxmode->hw_ip_checksum = 0;
707         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
708                 rxmode->hw_vlan_filter = 1;
709         else
710                 rxmode->hw_vlan_filter = 0;
711         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
712                 rxmode->hw_vlan_strip = 1;
713         else
714                 rxmode->hw_vlan_strip = 0;
715         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
716                 rxmode->hw_vlan_extend = 1;
717         else
718                 rxmode->hw_vlan_extend = 0;
719         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
720                 rxmode->jumbo_frame = 1;
721         else
722                 rxmode->jumbo_frame = 0;
723         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
724                 rxmode->hw_strip_crc = 1;
725         else
726                 rxmode->hw_strip_crc = 0;
727         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
728                 rxmode->enable_scatter = 1;
729         else
730                 rxmode->enable_scatter = 0;
731         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
732                 rxmode->enable_lro = 1;
733         else
734                 rxmode->enable_lro = 0;
735         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
736                 rxmode->hw_timestamp = 1;
737         else
738                 rxmode->hw_timestamp = 0;
739         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
740                 rxmode->security = 1;
741         else
742                 rxmode->security = 0;
743 }
744
745 int
746 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
747                       const struct rte_eth_conf *dev_conf)
748 {
749         struct rte_eth_dev *dev;
750         struct rte_eth_dev_info dev_info;
751         struct rte_eth_conf local_conf = *dev_conf;
752         int diag;
753
754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
755
756         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
757                 RTE_PMD_DEBUG_TRACE(
758                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
759                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
760                 return -EINVAL;
761         }
762
763         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
764                 RTE_PMD_DEBUG_TRACE(
765                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
766                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
767                 return -EINVAL;
768         }
769
770         dev = &rte_eth_devices[port_id];
771
772         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
773         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
774
775         if (dev->data->dev_started) {
776                 RTE_PMD_DEBUG_TRACE(
777                     "port %d must be stopped to allow configuration\n", port_id);
778                 return -EBUSY;
779         }
780
781         /*
782          * Convert between the offloads API to enable PMDs to support
783          * only one of them.
784          */
785         if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
786                 rte_eth_convert_rx_offload_bitfield(
787                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
788         } else {
789                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
790                                             &local_conf.rxmode);
791         }
792
793         /* Copy the dev_conf parameter into the dev structure */
794         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
795
796         /*
797          * Check that the numbers of RX and TX queues are not greater
798          * than the maximum number of RX and TX queues supported by the
799          * configured device.
800          */
801         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
802
803         if (nb_rx_q == 0 && nb_tx_q == 0) {
804                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
805                 return -EINVAL;
806         }
807
808         if (nb_rx_q > dev_info.max_rx_queues) {
809                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
810                                 port_id, nb_rx_q, dev_info.max_rx_queues);
811                 return -EINVAL;
812         }
813
814         if (nb_tx_q > dev_info.max_tx_queues) {
815                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
816                                 port_id, nb_tx_q, dev_info.max_tx_queues);
817                 return -EINVAL;
818         }
819
820         /* Check that the device supports requested interrupts */
821         if ((dev_conf->intr_conf.lsc == 1) &&
822                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
823                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
824                                         dev->device->driver->name);
825                         return -EINVAL;
826         }
827         if ((dev_conf->intr_conf.rmv == 1) &&
828             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
829                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
830                                     dev->device->driver->name);
831                 return -EINVAL;
832         }
833
834         /*
835          * If jumbo frames are enabled, check that the maximum RX packet
836          * length is supported by the configured device.
837          */
838         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
839                 if (dev_conf->rxmode.max_rx_pkt_len >
840                     dev_info.max_rx_pktlen) {
841                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
842                                 " > max valid value %u\n",
843                                 port_id,
844                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
845                                 (unsigned)dev_info.max_rx_pktlen);
846                         return -EINVAL;
847                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
848                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
849                                 " < min valid value %u\n",
850                                 port_id,
851                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
852                                 (unsigned)ETHER_MIN_LEN);
853                         return -EINVAL;
854                 }
855         } else {
856                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
857                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
858                         /* Use default value */
859                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
860                                                         ETHER_MAX_LEN;
861         }
862
863         /*
864          * Setup new number of RX/TX queues and reconfigure device.
865          */
866         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
867         if (diag != 0) {
868                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
869                                 port_id, diag);
870                 return diag;
871         }
872
873         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
874         if (diag != 0) {
875                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
876                                 port_id, diag);
877                 rte_eth_dev_rx_queue_config(dev, 0);
878                 return diag;
879         }
880
881         diag = (*dev->dev_ops->dev_configure)(dev);
882         if (diag != 0) {
883                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
884                                 port_id, diag);
885                 rte_eth_dev_rx_queue_config(dev, 0);
886                 rte_eth_dev_tx_queue_config(dev, 0);
887                 return diag;
888         }
889
890         /* Initialize Rx profiling if enabled at compilation time. */
891         diag = __rte_eth_profile_rx_init(port_id, dev);
892         if (diag != 0) {
893                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
894                                 port_id, diag);
895                 rte_eth_dev_rx_queue_config(dev, 0);
896                 rte_eth_dev_tx_queue_config(dev, 0);
897                 return diag;
898         }
899
900         return 0;
901 }
902
903 void
904 _rte_eth_dev_reset(struct rte_eth_dev *dev)
905 {
906         if (dev->data->dev_started) {
907                 RTE_PMD_DEBUG_TRACE(
908                         "port %d must be stopped to allow reset\n",
909                         dev->data->port_id);
910                 return;
911         }
912
913         rte_eth_dev_rx_queue_config(dev, 0);
914         rte_eth_dev_tx_queue_config(dev, 0);
915
916         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
917 }
918
919 static void
920 rte_eth_dev_config_restore(uint16_t port_id)
921 {
922         struct rte_eth_dev *dev;
923         struct rte_eth_dev_info dev_info;
924         struct ether_addr *addr;
925         uint16_t i;
926         uint32_t pool = 0;
927         uint64_t pool_mask;
928
929         dev = &rte_eth_devices[port_id];
930
931         rte_eth_dev_info_get(port_id, &dev_info);
932
933         /* replay MAC address configuration including default MAC */
934         addr = &dev->data->mac_addrs[0];
935         if (*dev->dev_ops->mac_addr_set != NULL)
936                 (*dev->dev_ops->mac_addr_set)(dev, addr);
937         else if (*dev->dev_ops->mac_addr_add != NULL)
938                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
939
940         if (*dev->dev_ops->mac_addr_add != NULL) {
941                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
942                         addr = &dev->data->mac_addrs[i];
943
944                         /* skip zero address */
945                         if (is_zero_ether_addr(addr))
946                                 continue;
947
948                         pool = 0;
949                         pool_mask = dev->data->mac_pool_sel[i];
950
951                         do {
952                                 if (pool_mask & 1ULL)
953                                         (*dev->dev_ops->mac_addr_add)(dev,
954                                                 addr, i, pool);
955                                 pool_mask >>= 1;
956                                 pool++;
957                         } while (pool_mask);
958                 }
959         }
960
961         /* replay promiscuous configuration */
962         if (rte_eth_promiscuous_get(port_id) == 1)
963                 rte_eth_promiscuous_enable(port_id);
964         else if (rte_eth_promiscuous_get(port_id) == 0)
965                 rte_eth_promiscuous_disable(port_id);
966
967         /* replay all multicast configuration */
968         if (rte_eth_allmulticast_get(port_id) == 1)
969                 rte_eth_allmulticast_enable(port_id);
970         else if (rte_eth_allmulticast_get(port_id) == 0)
971                 rte_eth_allmulticast_disable(port_id);
972 }
973
974 int
975 rte_eth_dev_start(uint16_t port_id)
976 {
977         struct rte_eth_dev *dev;
978         int diag;
979
980         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
981
982         dev = &rte_eth_devices[port_id];
983
984         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
985
986         if (dev->data->dev_started != 0) {
987                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
988                         " already started\n",
989                         port_id);
990                 return 0;
991         }
992
993         diag = (*dev->dev_ops->dev_start)(dev);
994         if (diag == 0)
995                 dev->data->dev_started = 1;
996         else
997                 return diag;
998
999         rte_eth_dev_config_restore(port_id);
1000
1001         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1002                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1003                 (*dev->dev_ops->link_update)(dev, 0);
1004         }
1005         return 0;
1006 }
1007
1008 void
1009 rte_eth_dev_stop(uint16_t port_id)
1010 {
1011         struct rte_eth_dev *dev;
1012
1013         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1014         dev = &rte_eth_devices[port_id];
1015
1016         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1017
1018         if (dev->data->dev_started == 0) {
1019                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1020                         " already stopped\n",
1021                         port_id);
1022                 return;
1023         }
1024
1025         dev->data->dev_started = 0;
1026         (*dev->dev_ops->dev_stop)(dev);
1027 }
1028
1029 int
1030 rte_eth_dev_set_link_up(uint16_t port_id)
1031 {
1032         struct rte_eth_dev *dev;
1033
1034         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1035
1036         dev = &rte_eth_devices[port_id];
1037
1038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1039         return (*dev->dev_ops->dev_set_link_up)(dev);
1040 }
1041
1042 int
1043 rte_eth_dev_set_link_down(uint16_t port_id)
1044 {
1045         struct rte_eth_dev *dev;
1046
1047         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1048
1049         dev = &rte_eth_devices[port_id];
1050
1051         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1052         return (*dev->dev_ops->dev_set_link_down)(dev);
1053 }
1054
1055 void
1056 rte_eth_dev_close(uint16_t port_id)
1057 {
1058         struct rte_eth_dev *dev;
1059
1060         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1061         dev = &rte_eth_devices[port_id];
1062
1063         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1064         dev->data->dev_started = 0;
1065         (*dev->dev_ops->dev_close)(dev);
1066
1067         dev->data->nb_rx_queues = 0;
1068         rte_free(dev->data->rx_queues);
1069         dev->data->rx_queues = NULL;
1070         dev->data->nb_tx_queues = 0;
1071         rte_free(dev->data->tx_queues);
1072         dev->data->tx_queues = NULL;
1073 }
1074
1075 int
1076 rte_eth_dev_reset(uint16_t port_id)
1077 {
1078         struct rte_eth_dev *dev;
1079         int ret;
1080
1081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1082         dev = &rte_eth_devices[port_id];
1083
1084         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1085
1086         rte_eth_dev_stop(port_id);
1087         ret = dev->dev_ops->dev_reset(dev);
1088
1089         return ret;
1090 }
1091
1092 int
1093 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1094                        uint16_t nb_rx_desc, unsigned int socket_id,
1095                        const struct rte_eth_rxconf *rx_conf,
1096                        struct rte_mempool *mp)
1097 {
1098         int ret;
1099         uint32_t mbp_buf_size;
1100         struct rte_eth_dev *dev;
1101         struct rte_eth_dev_info dev_info;
1102         struct rte_eth_rxconf local_conf;
1103         void **rxq;
1104
1105         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1106
1107         dev = &rte_eth_devices[port_id];
1108         if (rx_queue_id >= dev->data->nb_rx_queues) {
1109                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1110                 return -EINVAL;
1111         }
1112
1113         if (dev->data->dev_started) {
1114                 RTE_PMD_DEBUG_TRACE(
1115                     "port %d must be stopped to allow configuration\n", port_id);
1116                 return -EBUSY;
1117         }
1118
1119         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1120         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1121
1122         /*
1123          * Check the size of the mbuf data buffer.
1124          * This value must be provided in the private data of the memory pool.
1125          * First check that the memory pool has a valid private data.
1126          */
1127         rte_eth_dev_info_get(port_id, &dev_info);
1128         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1129                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1130                                 mp->name, (int) mp->private_data_size,
1131                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1132                 return -ENOSPC;
1133         }
1134         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1135
1136         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1137                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1138                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1139                                 "=%d)\n",
1140                                 mp->name,
1141                                 (int)mbp_buf_size,
1142                                 (int)(RTE_PKTMBUF_HEADROOM +
1143                                       dev_info.min_rx_bufsize),
1144                                 (int)RTE_PKTMBUF_HEADROOM,
1145                                 (int)dev_info.min_rx_bufsize);
1146                 return -EINVAL;
1147         }
1148
1149         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1150                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1151                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1152
1153                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1154                         "should be: <= %hu, = %hu, and a product of %hu\n",
1155                         nb_rx_desc,
1156                         dev_info.rx_desc_lim.nb_max,
1157                         dev_info.rx_desc_lim.nb_min,
1158                         dev_info.rx_desc_lim.nb_align);
1159                 return -EINVAL;
1160         }
1161
1162         rxq = dev->data->rx_queues;
1163         if (rxq[rx_queue_id]) {
1164                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1165                                         -ENOTSUP);
1166                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1167                 rxq[rx_queue_id] = NULL;
1168         }
1169
1170         if (rx_conf == NULL)
1171                 rx_conf = &dev_info.default_rxconf;
1172
1173         local_conf = *rx_conf;
1174         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1175                 /**
1176                  * Reflect port offloads to queue offloads in order for
1177                  * offloads to not be discarded.
1178                  */
1179                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1180                                                     &local_conf.offloads);
1181         }
1182
1183         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1184                                               socket_id, &local_conf, mp);
1185         if (!ret) {
1186                 if (!dev->data->min_rx_buf_size ||
1187                     dev->data->min_rx_buf_size > mbp_buf_size)
1188                         dev->data->min_rx_buf_size = mbp_buf_size;
1189         }
1190
1191         return ret;
1192 }
1193
1194 /**
1195  * A conversion function from txq_flags API.
1196  */
1197 static void
1198 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1199 {
1200         uint64_t offloads = 0;
1201
1202         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1203                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1204         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1205                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1206         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1207                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1208         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1209                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1210         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1211                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1212         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1213             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1214                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1215
1216         *tx_offloads = offloads;
1217 }
1218
1219 /**
1220  * A conversion function from offloads API.
1221  */
1222 static void
1223 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1224 {
1225         uint32_t flags = 0;
1226
1227         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1228                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1229         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1230                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1231         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1232                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1233         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1234                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1235         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1236                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1237         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1238                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1239
1240         *txq_flags = flags;
1241 }
1242
1243 int
1244 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1245                        uint16_t nb_tx_desc, unsigned int socket_id,
1246                        const struct rte_eth_txconf *tx_conf)
1247 {
1248         struct rte_eth_dev *dev;
1249         struct rte_eth_dev_info dev_info;
1250         struct rte_eth_txconf local_conf;
1251         void **txq;
1252
1253         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1254
1255         dev = &rte_eth_devices[port_id];
1256         if (tx_queue_id >= dev->data->nb_tx_queues) {
1257                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1258                 return -EINVAL;
1259         }
1260
1261         if (dev->data->dev_started) {
1262                 RTE_PMD_DEBUG_TRACE(
1263                     "port %d must be stopped to allow configuration\n", port_id);
1264                 return -EBUSY;
1265         }
1266
1267         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1268         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1269
1270         rte_eth_dev_info_get(port_id, &dev_info);
1271
1272         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1273             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1274             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1275                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1276                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1277                                 nb_tx_desc,
1278                                 dev_info.tx_desc_lim.nb_max,
1279                                 dev_info.tx_desc_lim.nb_min,
1280                                 dev_info.tx_desc_lim.nb_align);
1281                 return -EINVAL;
1282         }
1283
1284         txq = dev->data->tx_queues;
1285         if (txq[tx_queue_id]) {
1286                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1287                                         -ENOTSUP);
1288                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1289                 txq[tx_queue_id] = NULL;
1290         }
1291
1292         if (tx_conf == NULL)
1293                 tx_conf = &dev_info.default_txconf;
1294
1295         /*
1296          * Convert between the offloads API to enable PMDs to support
1297          * only one of them.
1298          */
1299         local_conf = *tx_conf;
1300         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1301                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1302                                              &local_conf.txq_flags);
1303                 /* Keep the ignore flag. */
1304                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1305         } else {
1306                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1307                                           &local_conf.offloads);
1308         }
1309
1310         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1311                                                socket_id, &local_conf);
1312 }
1313
1314 void
1315 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1316                 void *userdata __rte_unused)
1317 {
1318         unsigned i;
1319
1320         for (i = 0; i < unsent; i++)
1321                 rte_pktmbuf_free(pkts[i]);
1322 }
1323
1324 void
1325 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1326                 void *userdata)
1327 {
1328         uint64_t *count = userdata;
1329         unsigned i;
1330
1331         for (i = 0; i < unsent; i++)
1332                 rte_pktmbuf_free(pkts[i]);
1333
1334         *count += unsent;
1335 }
1336
1337 int
1338 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1339                 buffer_tx_error_fn cbfn, void *userdata)
1340 {
1341         buffer->error_callback = cbfn;
1342         buffer->error_userdata = userdata;
1343         return 0;
1344 }
1345
1346 int
1347 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1348 {
1349         int ret = 0;
1350
1351         if (buffer == NULL)
1352                 return -EINVAL;
1353
1354         buffer->size = size;
1355         if (buffer->error_callback == NULL) {
1356                 ret = rte_eth_tx_buffer_set_err_callback(
1357                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1358         }
1359
1360         return ret;
1361 }
1362
1363 int
1364 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1365 {
1366         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1367
1368         /* Validate Input Data. Bail if not valid or not supported. */
1369         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1370         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1371
1372         /* Call driver to free pending mbufs. */
1373         return (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1374                         free_cnt);
1375 }
1376
1377 void
1378 rte_eth_promiscuous_enable(uint16_t port_id)
1379 {
1380         struct rte_eth_dev *dev;
1381
1382         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1383         dev = &rte_eth_devices[port_id];
1384
1385         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1386         (*dev->dev_ops->promiscuous_enable)(dev);
1387         dev->data->promiscuous = 1;
1388 }
1389
1390 void
1391 rte_eth_promiscuous_disable(uint16_t port_id)
1392 {
1393         struct rte_eth_dev *dev;
1394
1395         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1396         dev = &rte_eth_devices[port_id];
1397
1398         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1399         dev->data->promiscuous = 0;
1400         (*dev->dev_ops->promiscuous_disable)(dev);
1401 }
1402
1403 int
1404 rte_eth_promiscuous_get(uint16_t port_id)
1405 {
1406         struct rte_eth_dev *dev;
1407
1408         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1409
1410         dev = &rte_eth_devices[port_id];
1411         return dev->data->promiscuous;
1412 }
1413
1414 void
1415 rte_eth_allmulticast_enable(uint16_t port_id)
1416 {
1417         struct rte_eth_dev *dev;
1418
1419         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1420         dev = &rte_eth_devices[port_id];
1421
1422         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1423         (*dev->dev_ops->allmulticast_enable)(dev);
1424         dev->data->all_multicast = 1;
1425 }
1426
1427 void
1428 rte_eth_allmulticast_disable(uint16_t port_id)
1429 {
1430         struct rte_eth_dev *dev;
1431
1432         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1433         dev = &rte_eth_devices[port_id];
1434
1435         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1436         dev->data->all_multicast = 0;
1437         (*dev->dev_ops->allmulticast_disable)(dev);
1438 }
1439
1440 int
1441 rte_eth_allmulticast_get(uint16_t port_id)
1442 {
1443         struct rte_eth_dev *dev;
1444
1445         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1446
1447         dev = &rte_eth_devices[port_id];
1448         return dev->data->all_multicast;
1449 }
1450
1451 static inline int
1452 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1453                                 struct rte_eth_link *link)
1454 {
1455         struct rte_eth_link *dst = link;
1456         struct rte_eth_link *src = &(dev->data->dev_link);
1457
1458         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1459                                         *(uint64_t *)src) == 0)
1460                 return -1;
1461
1462         return 0;
1463 }
1464
1465 void
1466 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1467 {
1468         struct rte_eth_dev *dev;
1469
1470         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1471         dev = &rte_eth_devices[port_id];
1472
1473         if (dev->data->dev_conf.intr_conf.lsc != 0)
1474                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1475         else {
1476                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1477                 (*dev->dev_ops->link_update)(dev, 1);
1478                 *eth_link = dev->data->dev_link;
1479         }
1480 }
1481
1482 void
1483 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1484 {
1485         struct rte_eth_dev *dev;
1486
1487         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1488         dev = &rte_eth_devices[port_id];
1489
1490         if (dev->data->dev_conf.intr_conf.lsc != 0)
1491                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1492         else {
1493                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1494                 (*dev->dev_ops->link_update)(dev, 0);
1495                 *eth_link = dev->data->dev_link;
1496         }
1497 }
1498
1499 int
1500 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1501 {
1502         struct rte_eth_dev *dev;
1503
1504         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1505
1506         dev = &rte_eth_devices[port_id];
1507         memset(stats, 0, sizeof(*stats));
1508
1509         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1510         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1511         return (*dev->dev_ops->stats_get)(dev, stats);
1512 }
1513
1514 int
1515 rte_eth_stats_reset(uint16_t port_id)
1516 {
1517         struct rte_eth_dev *dev;
1518
1519         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1520         dev = &rte_eth_devices[port_id];
1521
1522         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1523         (*dev->dev_ops->stats_reset)(dev);
1524         dev->data->rx_mbuf_alloc_failed = 0;
1525
1526         return 0;
1527 }
1528
1529 static inline int
1530 get_xstats_basic_count(struct rte_eth_dev *dev)
1531 {
1532         uint16_t nb_rxqs, nb_txqs;
1533         int count;
1534
1535         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1536         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1537
1538         count = RTE_NB_STATS;
1539         count += nb_rxqs * RTE_NB_RXQ_STATS;
1540         count += nb_txqs * RTE_NB_TXQ_STATS;
1541
1542         return count;
1543 }
1544
1545 static int
1546 get_xstats_count(uint16_t port_id)
1547 {
1548         struct rte_eth_dev *dev;
1549         int count;
1550
1551         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1552         dev = &rte_eth_devices[port_id];
1553         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1554                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1555                                 NULL, 0);
1556                 if (count < 0)
1557                         return count;
1558         }
1559         if (dev->dev_ops->xstats_get_names != NULL) {
1560                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1561                 if (count < 0)
1562                         return count;
1563         } else
1564                 count = 0;
1565
1566
1567         count += get_xstats_basic_count(dev);
1568
1569         return count;
1570 }
1571
1572 int
1573 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1574                 uint64_t *id)
1575 {
1576         int cnt_xstats, idx_xstat;
1577
1578         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1579
1580         if (!id) {
1581                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1582                 return -ENOMEM;
1583         }
1584
1585         if (!xstat_name) {
1586                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1587                 return -ENOMEM;
1588         }
1589
1590         /* Get count */
1591         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1592         if (cnt_xstats  < 0) {
1593                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1594                 return -ENODEV;
1595         }
1596
1597         /* Get id-name lookup table */
1598         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1599
1600         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1601                         port_id, xstats_names, cnt_xstats, NULL)) {
1602                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1603                 return -1;
1604         }
1605
1606         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1607                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1608                         *id = idx_xstat;
1609                         return 0;
1610                 };
1611         }
1612
1613         return -EINVAL;
1614 }
1615
1616 /* retrieve ethdev extended statistics names */
1617 int
1618 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1619         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1620         uint64_t *ids)
1621 {
1622         struct rte_eth_xstat_name *xstats_names_copy;
1623         unsigned int no_basic_stat_requested = 1;
1624         unsigned int expected_entries;
1625         struct rte_eth_dev *dev;
1626         unsigned int i;
1627         int ret;
1628
1629         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1630         dev = &rte_eth_devices[port_id];
1631
1632         ret = get_xstats_count(port_id);
1633         if (ret < 0)
1634                 return ret;
1635         expected_entries = (unsigned int)ret;
1636
1637         /* Return max number of stats if no ids given */
1638         if (!ids) {
1639                 if (!xstats_names)
1640                         return expected_entries;
1641                 else if (xstats_names && size < expected_entries)
1642                         return expected_entries;
1643         }
1644
1645         if (ids && !xstats_names)
1646                 return -EINVAL;
1647
1648         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1649                 unsigned int basic_count = get_xstats_basic_count(dev);
1650                 uint64_t ids_copy[size];
1651
1652                 for (i = 0; i < size; i++) {
1653                         if (ids[i] < basic_count) {
1654                                 no_basic_stat_requested = 0;
1655                                 break;
1656                         }
1657
1658                         /*
1659                          * Convert ids to xstats ids that PMD knows.
1660                          * ids known by user are basic + extended stats.
1661                          */
1662                         ids_copy[i] = ids[i] - basic_count;
1663                 }
1664
1665                 if (no_basic_stat_requested)
1666                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
1667                                         xstats_names, ids_copy, size);
1668         }
1669
1670         /* Retrieve all stats */
1671         if (!ids) {
1672                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
1673                                 expected_entries);
1674                 if (num_stats < 0 || num_stats > (int)expected_entries)
1675                         return num_stats;
1676                 else
1677                         return expected_entries;
1678         }
1679
1680         xstats_names_copy = calloc(expected_entries,
1681                 sizeof(struct rte_eth_xstat_name));
1682
1683         if (!xstats_names_copy) {
1684                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
1685                 return -ENOMEM;
1686         }
1687
1688         /* Fill xstats_names_copy structure */
1689         rte_eth_xstats_get_names(port_id, xstats_names_copy, expected_entries);
1690
1691         /* Filter stats */
1692         for (i = 0; i < size; i++) {
1693                 if (ids[i] >= expected_entries) {
1694                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1695                         free(xstats_names_copy);
1696                         return -1;
1697                 }
1698                 xstats_names[i] = xstats_names_copy[ids[i]];
1699         }
1700
1701         free(xstats_names_copy);
1702         return size;
1703 }
1704
1705 int
1706 rte_eth_xstats_get_names(uint16_t port_id,
1707         struct rte_eth_xstat_name *xstats_names,
1708         unsigned int size)
1709 {
1710         struct rte_eth_dev *dev;
1711         int cnt_used_entries;
1712         int cnt_expected_entries;
1713         int cnt_driver_entries;
1714         uint32_t idx, id_queue;
1715         uint16_t num_q;
1716
1717         cnt_expected_entries = get_xstats_count(port_id);
1718         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1719                         (int)size < cnt_expected_entries)
1720                 return cnt_expected_entries;
1721
1722         /* port_id checked in get_xstats_count() */
1723         dev = &rte_eth_devices[port_id];
1724         cnt_used_entries = 0;
1725
1726         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1727                 snprintf(xstats_names[cnt_used_entries].name,
1728                         sizeof(xstats_names[0].name),
1729                         "%s", rte_stats_strings[idx].name);
1730                 cnt_used_entries++;
1731         }
1732         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1733         for (id_queue = 0; id_queue < num_q; id_queue++) {
1734                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1735                         snprintf(xstats_names[cnt_used_entries].name,
1736                                 sizeof(xstats_names[0].name),
1737                                 "rx_q%u%s",
1738                                 id_queue, rte_rxq_stats_strings[idx].name);
1739                         cnt_used_entries++;
1740                 }
1741
1742         }
1743         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1744         for (id_queue = 0; id_queue < num_q; id_queue++) {
1745                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1746                         snprintf(xstats_names[cnt_used_entries].name,
1747                                 sizeof(xstats_names[0].name),
1748                                 "tx_q%u%s",
1749                                 id_queue, rte_txq_stats_strings[idx].name);
1750                         cnt_used_entries++;
1751                 }
1752         }
1753
1754         if (dev->dev_ops->xstats_get_names != NULL) {
1755                 /* If there are any driver-specific xstats, append them
1756                  * to end of list.
1757                  */
1758                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1759                         dev,
1760                         xstats_names + cnt_used_entries,
1761                         size - cnt_used_entries);
1762                 if (cnt_driver_entries < 0)
1763                         return cnt_driver_entries;
1764                 cnt_used_entries += cnt_driver_entries;
1765         }
1766
1767         return cnt_used_entries;
1768 }
1769
1770 /* retrieve ethdev extended statistics */
1771 int
1772 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
1773                          uint64_t *values, unsigned int size)
1774 {
1775         unsigned int no_basic_stat_requested = 1;
1776         unsigned int num_xstats_filled;
1777         uint16_t expected_entries;
1778         struct rte_eth_dev *dev;
1779         unsigned int i;
1780         int ret;
1781
1782         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1783         expected_entries = get_xstats_count(port_id);
1784         struct rte_eth_xstat xstats[expected_entries];
1785         dev = &rte_eth_devices[port_id];
1786
1787         /* Return max number of stats if no ids given */
1788         if (!ids) {
1789                 if (!values)
1790                         return expected_entries;
1791                 else if (values && size < expected_entries)
1792                         return expected_entries;
1793         }
1794
1795         if (ids && !values)
1796                 return -EINVAL;
1797
1798         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
1799                 unsigned int basic_count = get_xstats_basic_count(dev);
1800                 uint64_t ids_copy[size];
1801
1802                 for (i = 0; i < size; i++) {
1803                         if (ids[i] < basic_count) {
1804                                 no_basic_stat_requested = 0;
1805                                 break;
1806                         }
1807
1808                         /*
1809                          * Convert ids to xstats ids that PMD knows.
1810                          * ids known by user are basic + extended stats.
1811                          */
1812                         ids_copy[i] = ids[i] - basic_count;
1813                 }
1814
1815                 if (no_basic_stat_requested)
1816                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
1817                                         values, size);
1818         }
1819
1820         /* Fill the xstats structure */
1821         ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
1822         if (ret < 0)
1823                 return ret;
1824         num_xstats_filled = (unsigned int)ret;
1825
1826         /* Return all stats */
1827         if (!ids) {
1828                 for (i = 0; i < num_xstats_filled; i++)
1829                         values[i] = xstats[i].value;
1830                 return expected_entries;
1831         }
1832
1833         /* Filter stats */
1834         for (i = 0; i < size; i++) {
1835                 if (ids[i] >= expected_entries) {
1836                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1837                         return -1;
1838                 }
1839                 values[i] = xstats[ids[i]].value;
1840         }
1841         return size;
1842 }
1843
1844 int
1845 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
1846         unsigned int n)
1847 {
1848         struct rte_eth_stats eth_stats;
1849         struct rte_eth_dev *dev;
1850         unsigned int count = 0, i, q;
1851         signed int xcount = 0;
1852         uint64_t val, *stats_ptr;
1853         uint16_t nb_rxqs, nb_txqs;
1854
1855         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1856
1857         dev = &rte_eth_devices[port_id];
1858
1859         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1860         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1861
1862         /* Return generic statistics */
1863         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1864                 (nb_txqs * RTE_NB_TXQ_STATS);
1865
1866         /* implemented by the driver */
1867         if (dev->dev_ops->xstats_get != NULL) {
1868                 /* Retrieve the xstats from the driver at the end of the
1869                  * xstats struct.
1870                  */
1871                 xcount = (*dev->dev_ops->xstats_get)(dev,
1872                                      xstats ? xstats + count : NULL,
1873                                      (n > count) ? n - count : 0);
1874
1875                 if (xcount < 0)
1876                         return xcount;
1877         }
1878
1879         if (n < count + xcount || xstats == NULL)
1880                 return count + xcount;
1881
1882         /* now fill the xstats structure */
1883         count = 0;
1884         rte_eth_stats_get(port_id, &eth_stats);
1885
1886         /* global stats */
1887         for (i = 0; i < RTE_NB_STATS; i++) {
1888                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1889                                         rte_stats_strings[i].offset);
1890                 val = *stats_ptr;
1891                 xstats[count++].value = val;
1892         }
1893
1894         /* per-rxq stats */
1895         for (q = 0; q < nb_rxqs; q++) {
1896                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1897                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1898                                         rte_rxq_stats_strings[i].offset +
1899                                         q * sizeof(uint64_t));
1900                         val = *stats_ptr;
1901                         xstats[count++].value = val;
1902                 }
1903         }
1904
1905         /* per-txq stats */
1906         for (q = 0; q < nb_txqs; q++) {
1907                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1908                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1909                                         rte_txq_stats_strings[i].offset +
1910                                         q * sizeof(uint64_t));
1911                         val = *stats_ptr;
1912                         xstats[count++].value = val;
1913                 }
1914         }
1915
1916         for (i = 0; i < count; i++)
1917                 xstats[i].id = i;
1918         /* add an offset to driver-specific stats */
1919         for ( ; i < count + xcount; i++)
1920                 xstats[i].id += count;
1921
1922         return count + xcount;
1923 }
1924
1925 /* reset ethdev extended statistics */
1926 void
1927 rte_eth_xstats_reset(uint16_t port_id)
1928 {
1929         struct rte_eth_dev *dev;
1930
1931         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1932         dev = &rte_eth_devices[port_id];
1933
1934         /* implemented by the driver */
1935         if (dev->dev_ops->xstats_reset != NULL) {
1936                 (*dev->dev_ops->xstats_reset)(dev);
1937                 return;
1938         }
1939
1940         /* fallback to default */
1941         rte_eth_stats_reset(port_id);
1942 }
1943
1944 static int
1945 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
1946                 uint8_t is_rx)
1947 {
1948         struct rte_eth_dev *dev;
1949
1950         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1951
1952         dev = &rte_eth_devices[port_id];
1953
1954         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1955         return (*dev->dev_ops->queue_stats_mapping_set)
1956                         (dev, queue_id, stat_idx, is_rx);
1957 }
1958
1959
1960 int
1961 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
1962                 uint8_t stat_idx)
1963 {
1964         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1965                         STAT_QMAP_TX);
1966 }
1967
1968
1969 int
1970 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
1971                 uint8_t stat_idx)
1972 {
1973         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1974                         STAT_QMAP_RX);
1975 }
1976
1977 int
1978 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
1979 {
1980         struct rte_eth_dev *dev;
1981
1982         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1983         dev = &rte_eth_devices[port_id];
1984
1985         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
1986         return (*dev->dev_ops->fw_version_get)(dev, fw_version, fw_size);
1987 }
1988
1989 void
1990 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
1991 {
1992         struct rte_eth_dev *dev;
1993         const struct rte_eth_desc_lim lim = {
1994                 .nb_max = UINT16_MAX,
1995                 .nb_min = 0,
1996                 .nb_align = 1,
1997         };
1998
1999         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2000         dev = &rte_eth_devices[port_id];
2001
2002         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2003         dev_info->rx_desc_lim = lim;
2004         dev_info->tx_desc_lim = lim;
2005
2006         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2007         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2008         dev_info->driver_name = dev->device->driver->name;
2009         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2010         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2011 }
2012
2013 int
2014 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2015                                  uint32_t *ptypes, int num)
2016 {
2017         int i, j;
2018         struct rte_eth_dev *dev;
2019         const uint32_t *all_ptypes;
2020
2021         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2022         dev = &rte_eth_devices[port_id];
2023         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2024         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2025
2026         if (!all_ptypes)
2027                 return 0;
2028
2029         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2030                 if (all_ptypes[i] & ptype_mask) {
2031                         if (j < num)
2032                                 ptypes[j] = all_ptypes[i];
2033                         j++;
2034                 }
2035
2036         return j;
2037 }
2038
2039 void
2040 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2041 {
2042         struct rte_eth_dev *dev;
2043
2044         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2045         dev = &rte_eth_devices[port_id];
2046         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2047 }
2048
2049
2050 int
2051 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2052 {
2053         struct rte_eth_dev *dev;
2054
2055         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2056
2057         dev = &rte_eth_devices[port_id];
2058         *mtu = dev->data->mtu;
2059         return 0;
2060 }
2061
2062 int
2063 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2064 {
2065         int ret;
2066         struct rte_eth_dev *dev;
2067
2068         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2069         dev = &rte_eth_devices[port_id];
2070         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2071
2072         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2073         if (!ret)
2074                 dev->data->mtu = mtu;
2075
2076         return ret;
2077 }
2078
2079 int
2080 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2081 {
2082         struct rte_eth_dev *dev;
2083         int ret;
2084
2085         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2086         dev = &rte_eth_devices[port_id];
2087         if (!(dev->data->dev_conf.rxmode.offloads &
2088               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2089                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2090                 return -ENOSYS;
2091         }
2092
2093         if (vlan_id > 4095) {
2094                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2095                                 port_id, (unsigned) vlan_id);
2096                 return -EINVAL;
2097         }
2098         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2099
2100         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2101         if (ret == 0) {
2102                 struct rte_vlan_filter_conf *vfc;
2103                 int vidx;
2104                 int vbit;
2105
2106                 vfc = &dev->data->vlan_filter_conf;
2107                 vidx = vlan_id / 64;
2108                 vbit = vlan_id % 64;
2109
2110                 if (on)
2111                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2112                 else
2113                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2114         }
2115
2116         return ret;
2117 }
2118
2119 int
2120 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2121                                     int on)
2122 {
2123         struct rte_eth_dev *dev;
2124
2125         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2126         dev = &rte_eth_devices[port_id];
2127         if (rx_queue_id >= dev->data->nb_rx_queues) {
2128                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2129                 return -EINVAL;
2130         }
2131
2132         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2133         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2134
2135         return 0;
2136 }
2137
2138 int
2139 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2140                                 enum rte_vlan_type vlan_type,
2141                                 uint16_t tpid)
2142 {
2143         struct rte_eth_dev *dev;
2144
2145         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2146         dev = &rte_eth_devices[port_id];
2147         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2148
2149         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
2150 }
2151
2152 int
2153 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2154 {
2155         struct rte_eth_dev *dev;
2156         int ret = 0;
2157         int mask = 0;
2158         int cur, org = 0;
2159         uint64_t orig_offloads;
2160
2161         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2162         dev = &rte_eth_devices[port_id];
2163
2164         /* save original values in case of failure */
2165         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2166
2167         /*check which option changed by application*/
2168         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2169         org = !!(dev->data->dev_conf.rxmode.offloads &
2170                  DEV_RX_OFFLOAD_VLAN_STRIP);
2171         if (cur != org) {
2172                 if (cur)
2173                         dev->data->dev_conf.rxmode.offloads |=
2174                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2175                 else
2176                         dev->data->dev_conf.rxmode.offloads &=
2177                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2178                 mask |= ETH_VLAN_STRIP_MASK;
2179         }
2180
2181         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2182         org = !!(dev->data->dev_conf.rxmode.offloads &
2183                  DEV_RX_OFFLOAD_VLAN_FILTER);
2184         if (cur != org) {
2185                 if (cur)
2186                         dev->data->dev_conf.rxmode.offloads |=
2187                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2188                 else
2189                         dev->data->dev_conf.rxmode.offloads &=
2190                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2191                 mask |= ETH_VLAN_FILTER_MASK;
2192         }
2193
2194         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2195         org = !!(dev->data->dev_conf.rxmode.offloads &
2196                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2197         if (cur != org) {
2198                 if (cur)
2199                         dev->data->dev_conf.rxmode.offloads |=
2200                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2201                 else
2202                         dev->data->dev_conf.rxmode.offloads &=
2203                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2204                 mask |= ETH_VLAN_EXTEND_MASK;
2205         }
2206
2207         /*no change*/
2208         if (mask == 0)
2209                 return ret;
2210
2211         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2212
2213         /*
2214          * Convert to the offload bitfield API just in case the underlying PMD
2215          * still supporting it.
2216          */
2217         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2218                                     &dev->data->dev_conf.rxmode);
2219         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2220         if (ret) {
2221                 /* hit an error restore  original values */
2222                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2223                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2224                                             &dev->data->dev_conf.rxmode);
2225         }
2226
2227         return ret;
2228 }
2229
2230 int
2231 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2232 {
2233         struct rte_eth_dev *dev;
2234         int ret = 0;
2235
2236         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2237         dev = &rte_eth_devices[port_id];
2238
2239         if (dev->data->dev_conf.rxmode.offloads &
2240             DEV_RX_OFFLOAD_VLAN_STRIP)
2241                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2242
2243         if (dev->data->dev_conf.rxmode.offloads &
2244             DEV_RX_OFFLOAD_VLAN_FILTER)
2245                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2246
2247         if (dev->data->dev_conf.rxmode.offloads &
2248             DEV_RX_OFFLOAD_VLAN_EXTEND)
2249                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2250
2251         return ret;
2252 }
2253
2254 int
2255 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2256 {
2257         struct rte_eth_dev *dev;
2258
2259         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2260         dev = &rte_eth_devices[port_id];
2261         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2262         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
2263
2264         return 0;
2265 }
2266
2267 int
2268 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2269 {
2270         struct rte_eth_dev *dev;
2271
2272         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2273         dev = &rte_eth_devices[port_id];
2274         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2275         memset(fc_conf, 0, sizeof(*fc_conf));
2276         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2277 }
2278
2279 int
2280 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2281 {
2282         struct rte_eth_dev *dev;
2283
2284         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2285         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2286                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2287                 return -EINVAL;
2288         }
2289
2290         dev = &rte_eth_devices[port_id];
2291         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2292         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2293 }
2294
2295 int
2296 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2297                                    struct rte_eth_pfc_conf *pfc_conf)
2298 {
2299         struct rte_eth_dev *dev;
2300
2301         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2302         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2303                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2304                 return -EINVAL;
2305         }
2306
2307         dev = &rte_eth_devices[port_id];
2308         /* High water, low water validation are device specific */
2309         if  (*dev->dev_ops->priority_flow_ctrl_set)
2310                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2311         return -ENOTSUP;
2312 }
2313
2314 static int
2315 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2316                         uint16_t reta_size)
2317 {
2318         uint16_t i, num;
2319
2320         if (!reta_conf)
2321                 return -EINVAL;
2322
2323         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2324         for (i = 0; i < num; i++) {
2325                 if (reta_conf[i].mask)
2326                         return 0;
2327         }
2328
2329         return -EINVAL;
2330 }
2331
2332 static int
2333 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2334                          uint16_t reta_size,
2335                          uint16_t max_rxq)
2336 {
2337         uint16_t i, idx, shift;
2338
2339         if (!reta_conf)
2340                 return -EINVAL;
2341
2342         if (max_rxq == 0) {
2343                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2344                 return -EINVAL;
2345         }
2346
2347         for (i = 0; i < reta_size; i++) {
2348                 idx = i / RTE_RETA_GROUP_SIZE;
2349                 shift = i % RTE_RETA_GROUP_SIZE;
2350                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2351                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2352                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2353                                 "the maximum rxq index: %u\n", idx, shift,
2354                                 reta_conf[idx].reta[shift], max_rxq);
2355                         return -EINVAL;
2356                 }
2357         }
2358
2359         return 0;
2360 }
2361
2362 int
2363 rte_eth_dev_rss_reta_update(uint16_t port_id,
2364                             struct rte_eth_rss_reta_entry64 *reta_conf,
2365                             uint16_t reta_size)
2366 {
2367         struct rte_eth_dev *dev;
2368         int ret;
2369
2370         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2371         /* Check mask bits */
2372         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2373         if (ret < 0)
2374                 return ret;
2375
2376         dev = &rte_eth_devices[port_id];
2377
2378         /* Check entry value */
2379         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2380                                 dev->data->nb_rx_queues);
2381         if (ret < 0)
2382                 return ret;
2383
2384         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2385         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2386 }
2387
2388 int
2389 rte_eth_dev_rss_reta_query(uint16_t port_id,
2390                            struct rte_eth_rss_reta_entry64 *reta_conf,
2391                            uint16_t reta_size)
2392 {
2393         struct rte_eth_dev *dev;
2394         int ret;
2395
2396         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2397
2398         /* Check mask bits */
2399         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2400         if (ret < 0)
2401                 return ret;
2402
2403         dev = &rte_eth_devices[port_id];
2404         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2405         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2406 }
2407
2408 int
2409 rte_eth_dev_rss_hash_update(uint16_t port_id,
2410                             struct rte_eth_rss_conf *rss_conf)
2411 {
2412         struct rte_eth_dev *dev;
2413
2414         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2415         dev = &rte_eth_devices[port_id];
2416         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2417         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2418 }
2419
2420 int
2421 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2422                               struct rte_eth_rss_conf *rss_conf)
2423 {
2424         struct rte_eth_dev *dev;
2425
2426         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2427         dev = &rte_eth_devices[port_id];
2428         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2429         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2430 }
2431
2432 int
2433 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2434                                 struct rte_eth_udp_tunnel *udp_tunnel)
2435 {
2436         struct rte_eth_dev *dev;
2437
2438         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2439         if (udp_tunnel == NULL) {
2440                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2441                 return -EINVAL;
2442         }
2443
2444         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2445                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2446                 return -EINVAL;
2447         }
2448
2449         dev = &rte_eth_devices[port_id];
2450         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2451         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
2452 }
2453
2454 int
2455 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2456                                    struct rte_eth_udp_tunnel *udp_tunnel)
2457 {
2458         struct rte_eth_dev *dev;
2459
2460         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2461         dev = &rte_eth_devices[port_id];
2462
2463         if (udp_tunnel == NULL) {
2464                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2465                 return -EINVAL;
2466         }
2467
2468         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2469                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2470                 return -EINVAL;
2471         }
2472
2473         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2474         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2475 }
2476
2477 int
2478 rte_eth_led_on(uint16_t port_id)
2479 {
2480         struct rte_eth_dev *dev;
2481
2482         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2483         dev = &rte_eth_devices[port_id];
2484         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2485         return (*dev->dev_ops->dev_led_on)(dev);
2486 }
2487
2488 int
2489 rte_eth_led_off(uint16_t port_id)
2490 {
2491         struct rte_eth_dev *dev;
2492
2493         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2494         dev = &rte_eth_devices[port_id];
2495         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2496         return (*dev->dev_ops->dev_led_off)(dev);
2497 }
2498
2499 /*
2500  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2501  * an empty spot.
2502  */
2503 static int
2504 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2505 {
2506         struct rte_eth_dev_info dev_info;
2507         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2508         unsigned i;
2509
2510         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2511         rte_eth_dev_info_get(port_id, &dev_info);
2512
2513         for (i = 0; i < dev_info.max_mac_addrs; i++)
2514                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2515                         return i;
2516
2517         return -1;
2518 }
2519
2520 static const struct ether_addr null_mac_addr;
2521
2522 int
2523 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2524                         uint32_t pool)
2525 {
2526         struct rte_eth_dev *dev;
2527         int index;
2528         uint64_t pool_mask;
2529         int ret;
2530
2531         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2532         dev = &rte_eth_devices[port_id];
2533         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2534
2535         if (is_zero_ether_addr(addr)) {
2536                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2537                         port_id);
2538                 return -EINVAL;
2539         }
2540         if (pool >= ETH_64_POOLS) {
2541                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2542                 return -EINVAL;
2543         }
2544
2545         index = get_mac_addr_index(port_id, addr);
2546         if (index < 0) {
2547                 index = get_mac_addr_index(port_id, &null_mac_addr);
2548                 if (index < 0) {
2549                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2550                                 port_id);
2551                         return -ENOSPC;
2552                 }
2553         } else {
2554                 pool_mask = dev->data->mac_pool_sel[index];
2555
2556                 /* Check if both MAC address and pool is already there, and do nothing */
2557                 if (pool_mask & (1ULL << pool))
2558                         return 0;
2559         }
2560
2561         /* Update NIC */
2562         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2563
2564         if (ret == 0) {
2565                 /* Update address in NIC data structure */
2566                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2567
2568                 /* Update pool bitmap in NIC data structure */
2569                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2570         }
2571
2572         return ret;
2573 }
2574
2575 int
2576 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2577 {
2578         struct rte_eth_dev *dev;
2579         int index;
2580
2581         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2582         dev = &rte_eth_devices[port_id];
2583         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2584
2585         index = get_mac_addr_index(port_id, addr);
2586         if (index == 0) {
2587                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2588                 return -EADDRINUSE;
2589         } else if (index < 0)
2590                 return 0;  /* Do nothing if address wasn't found */
2591
2592         /* Update NIC */
2593         (*dev->dev_ops->mac_addr_remove)(dev, index);
2594
2595         /* Update address in NIC data structure */
2596         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2597
2598         /* reset pool bitmap */
2599         dev->data->mac_pool_sel[index] = 0;
2600
2601         return 0;
2602 }
2603
2604 int
2605 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2606 {
2607         struct rte_eth_dev *dev;
2608
2609         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2610
2611         if (!is_valid_assigned_ether_addr(addr))
2612                 return -EINVAL;
2613
2614         dev = &rte_eth_devices[port_id];
2615         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2616
2617         /* Update default address in NIC data structure */
2618         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2619
2620         (*dev->dev_ops->mac_addr_set)(dev, addr);
2621
2622         return 0;
2623 }
2624
2625
2626 /*
2627  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2628  * an empty spot.
2629  */
2630 static int
2631 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2632 {
2633         struct rte_eth_dev_info dev_info;
2634         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2635         unsigned i;
2636
2637         rte_eth_dev_info_get(port_id, &dev_info);
2638         if (!dev->data->hash_mac_addrs)
2639                 return -1;
2640
2641         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2642                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2643                         ETHER_ADDR_LEN) == 0)
2644                         return i;
2645
2646         return -1;
2647 }
2648
2649 int
2650 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2651                                 uint8_t on)
2652 {
2653         int index;
2654         int ret;
2655         struct rte_eth_dev *dev;
2656
2657         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2658
2659         dev = &rte_eth_devices[port_id];
2660         if (is_zero_ether_addr(addr)) {
2661                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2662                         port_id);
2663                 return -EINVAL;
2664         }
2665
2666         index = get_hash_mac_addr_index(port_id, addr);
2667         /* Check if it's already there, and do nothing */
2668         if ((index >= 0) && on)
2669                 return 0;
2670
2671         if (index < 0) {
2672                 if (!on) {
2673                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2674                                 "set in UTA\n", port_id);
2675                         return -EINVAL;
2676                 }
2677
2678                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2679                 if (index < 0) {
2680                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2681                                         port_id);
2682                         return -ENOSPC;
2683                 }
2684         }
2685
2686         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2687         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2688         if (ret == 0) {
2689                 /* Update address in NIC data structure */
2690                 if (on)
2691                         ether_addr_copy(addr,
2692                                         &dev->data->hash_mac_addrs[index]);
2693                 else
2694                         ether_addr_copy(&null_mac_addr,
2695                                         &dev->data->hash_mac_addrs[index]);
2696         }
2697
2698         return ret;
2699 }
2700
2701 int
2702 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
2703 {
2704         struct rte_eth_dev *dev;
2705
2706         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2707
2708         dev = &rte_eth_devices[port_id];
2709
2710         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2711         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2712 }
2713
2714 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
2715                                         uint16_t tx_rate)
2716 {
2717         struct rte_eth_dev *dev;
2718         struct rte_eth_dev_info dev_info;
2719         struct rte_eth_link link;
2720
2721         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2722
2723         dev = &rte_eth_devices[port_id];
2724         rte_eth_dev_info_get(port_id, &dev_info);
2725         link = dev->data->dev_link;
2726
2727         if (queue_idx > dev_info.max_tx_queues) {
2728                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2729                                 "invalid queue id=%d\n", port_id, queue_idx);
2730                 return -EINVAL;
2731         }
2732
2733         if (tx_rate > link.link_speed) {
2734                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2735                                 "bigger than link speed= %d\n",
2736                         tx_rate, link.link_speed);
2737                 return -EINVAL;
2738         }
2739
2740         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2741         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2742 }
2743
2744 int
2745 rte_eth_mirror_rule_set(uint16_t port_id,
2746                         struct rte_eth_mirror_conf *mirror_conf,
2747                         uint8_t rule_id, uint8_t on)
2748 {
2749         struct rte_eth_dev *dev;
2750
2751         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2752         if (mirror_conf->rule_type == 0) {
2753                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2754                 return -EINVAL;
2755         }
2756
2757         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2758                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2759                                 ETH_64_POOLS - 1);
2760                 return -EINVAL;
2761         }
2762
2763         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2764              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2765             (mirror_conf->pool_mask == 0)) {
2766                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2767                 return -EINVAL;
2768         }
2769
2770         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2771             mirror_conf->vlan.vlan_mask == 0) {
2772                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2773                 return -EINVAL;
2774         }
2775
2776         dev = &rte_eth_devices[port_id];
2777         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2778
2779         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2780 }
2781
2782 int
2783 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
2784 {
2785         struct rte_eth_dev *dev;
2786
2787         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2788
2789         dev = &rte_eth_devices[port_id];
2790         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2791
2792         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2793 }
2794
2795 int
2796 rte_eth_dev_callback_register(uint16_t port_id,
2797                         enum rte_eth_event_type event,
2798                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2799 {
2800         struct rte_eth_dev *dev;
2801         struct rte_eth_dev_callback *user_cb;
2802
2803         if (!cb_fn)
2804                 return -EINVAL;
2805
2806         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2807
2808         dev = &rte_eth_devices[port_id];
2809         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2810
2811         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2812                 if (user_cb->cb_fn == cb_fn &&
2813                         user_cb->cb_arg == cb_arg &&
2814                         user_cb->event == event) {
2815                         break;
2816                 }
2817         }
2818
2819         /* create a new callback. */
2820         if (user_cb == NULL) {
2821                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2822                                         sizeof(struct rte_eth_dev_callback), 0);
2823                 if (user_cb != NULL) {
2824                         user_cb->cb_fn = cb_fn;
2825                         user_cb->cb_arg = cb_arg;
2826                         user_cb->event = event;
2827                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2828                 }
2829         }
2830
2831         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2832         return (user_cb == NULL) ? -ENOMEM : 0;
2833 }
2834
2835 int
2836 rte_eth_dev_callback_unregister(uint16_t port_id,
2837                         enum rte_eth_event_type event,
2838                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2839 {
2840         int ret;
2841         struct rte_eth_dev *dev;
2842         struct rte_eth_dev_callback *cb, *next;
2843
2844         if (!cb_fn)
2845                 return -EINVAL;
2846
2847         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2848
2849         dev = &rte_eth_devices[port_id];
2850         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2851
2852         ret = 0;
2853         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2854
2855                 next = TAILQ_NEXT(cb, next);
2856
2857                 if (cb->cb_fn != cb_fn || cb->event != event ||
2858                                 (cb->cb_arg != (void *)-1 &&
2859                                 cb->cb_arg != cb_arg))
2860                         continue;
2861
2862                 /*
2863                  * if this callback is not executing right now,
2864                  * then remove it.
2865                  */
2866                 if (cb->active == 0) {
2867                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2868                         rte_free(cb);
2869                 } else {
2870                         ret = -EAGAIN;
2871                 }
2872         }
2873
2874         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2875         return ret;
2876 }
2877
2878 int
2879 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2880         enum rte_eth_event_type event, void *cb_arg, void *ret_param)
2881 {
2882         struct rte_eth_dev_callback *cb_lst;
2883         struct rte_eth_dev_callback dev_cb;
2884         int rc = 0;
2885
2886         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2887         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2888                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2889                         continue;
2890                 dev_cb = *cb_lst;
2891                 cb_lst->active = 1;
2892                 if (cb_arg != NULL)
2893                         dev_cb.cb_arg = cb_arg;
2894                 if (ret_param != NULL)
2895                         dev_cb.ret_param = ret_param;
2896
2897                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2898                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2899                                 dev_cb.cb_arg, dev_cb.ret_param);
2900                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2901                 cb_lst->active = 0;
2902         }
2903         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2904         return rc;
2905 }
2906
2907 int
2908 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
2909 {
2910         uint32_t vec;
2911         struct rte_eth_dev *dev;
2912         struct rte_intr_handle *intr_handle;
2913         uint16_t qid;
2914         int rc;
2915
2916         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2917
2918         dev = &rte_eth_devices[port_id];
2919
2920         if (!dev->intr_handle) {
2921                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
2922                 return -ENOTSUP;
2923         }
2924
2925         intr_handle = dev->intr_handle;
2926         if (!intr_handle->intr_vec) {
2927                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2928                 return -EPERM;
2929         }
2930
2931         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2932                 vec = intr_handle->intr_vec[qid];
2933                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2934                 if (rc && rc != -EEXIST) {
2935                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2936                                         " op %d epfd %d vec %u\n",
2937                                         port_id, qid, op, epfd, vec);
2938                 }
2939         }
2940
2941         return 0;
2942 }
2943
2944 const struct rte_memzone *
2945 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2946                          uint16_t queue_id, size_t size, unsigned align,
2947                          int socket_id)
2948 {
2949         char z_name[RTE_MEMZONE_NAMESIZE];
2950         const struct rte_memzone *mz;
2951
2952         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2953                  dev->device->driver->name, ring_name,
2954                  dev->data->port_id, queue_id);
2955
2956         mz = rte_memzone_lookup(z_name);
2957         if (mz)
2958                 return mz;
2959
2960         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
2961 }
2962
2963 int
2964 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
2965                           int epfd, int op, void *data)
2966 {
2967         uint32_t vec;
2968         struct rte_eth_dev *dev;
2969         struct rte_intr_handle *intr_handle;
2970         int rc;
2971
2972         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2973
2974         dev = &rte_eth_devices[port_id];
2975         if (queue_id >= dev->data->nb_rx_queues) {
2976                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2977                 return -EINVAL;
2978         }
2979
2980         if (!dev->intr_handle) {
2981                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
2982                 return -ENOTSUP;
2983         }
2984
2985         intr_handle = dev->intr_handle;
2986         if (!intr_handle->intr_vec) {
2987                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2988                 return -EPERM;
2989         }
2990
2991         vec = intr_handle->intr_vec[queue_id];
2992         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2993         if (rc && rc != -EEXIST) {
2994                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2995                                 " op %d epfd %d vec %u\n",
2996                                 port_id, queue_id, op, epfd, vec);
2997                 return rc;
2998         }
2999
3000         return 0;
3001 }
3002
3003 int
3004 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3005                            uint16_t queue_id)
3006 {
3007         struct rte_eth_dev *dev;
3008
3009         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3010
3011         dev = &rte_eth_devices[port_id];
3012
3013         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3014         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
3015 }
3016
3017 int
3018 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3019                             uint16_t queue_id)
3020 {
3021         struct rte_eth_dev *dev;
3022
3023         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3024
3025         dev = &rte_eth_devices[port_id];
3026
3027         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3028         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
3029 }
3030
3031
3032 int
3033 rte_eth_dev_filter_supported(uint16_t port_id,
3034                              enum rte_filter_type filter_type)
3035 {
3036         struct rte_eth_dev *dev;
3037
3038         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3039
3040         dev = &rte_eth_devices[port_id];
3041         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3042         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3043                                 RTE_ETH_FILTER_NOP, NULL);
3044 }
3045
3046 int
3047 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3048                        enum rte_filter_op filter_op, void *arg)
3049 {
3050         struct rte_eth_dev *dev;
3051
3052         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3053
3054         dev = &rte_eth_devices[port_id];
3055         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3056         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3057 }
3058
3059 void *
3060 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3061                 rte_rx_callback_fn fn, void *user_param)
3062 {
3063 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3064         rte_errno = ENOTSUP;
3065         return NULL;
3066 #endif
3067         /* check input parameters */
3068         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3069                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3070                 rte_errno = EINVAL;
3071                 return NULL;
3072         }
3073         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3074
3075         if (cb == NULL) {
3076                 rte_errno = ENOMEM;
3077                 return NULL;
3078         }
3079
3080         cb->fn.rx = fn;
3081         cb->param = user_param;
3082
3083         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3084         /* Add the callbacks in fifo order. */
3085         struct rte_eth_rxtx_callback *tail =
3086                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3087
3088         if (!tail) {
3089                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3090
3091         } else {
3092                 while (tail->next)
3093                         tail = tail->next;
3094                 tail->next = cb;
3095         }
3096         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3097
3098         return cb;
3099 }
3100
3101 void *
3102 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3103                 rte_rx_callback_fn fn, void *user_param)
3104 {
3105 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3106         rte_errno = ENOTSUP;
3107         return NULL;
3108 #endif
3109         /* check input parameters */
3110         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3111                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3112                 rte_errno = EINVAL;
3113                 return NULL;
3114         }
3115
3116         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3117
3118         if (cb == NULL) {
3119                 rte_errno = ENOMEM;
3120                 return NULL;
3121         }
3122
3123         cb->fn.rx = fn;
3124         cb->param = user_param;
3125
3126         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3127         /* Add the callbacks at fisrt position*/
3128         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3129         rte_smp_wmb();
3130         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3131         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3132
3133         return cb;
3134 }
3135
3136 void *
3137 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3138                 rte_tx_callback_fn fn, void *user_param)
3139 {
3140 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3141         rte_errno = ENOTSUP;
3142         return NULL;
3143 #endif
3144         /* check input parameters */
3145         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3146                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3147                 rte_errno = EINVAL;
3148                 return NULL;
3149         }
3150
3151         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3152
3153         if (cb == NULL) {
3154                 rte_errno = ENOMEM;
3155                 return NULL;
3156         }
3157
3158         cb->fn.tx = fn;
3159         cb->param = user_param;
3160
3161         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3162         /* Add the callbacks in fifo order. */
3163         struct rte_eth_rxtx_callback *tail =
3164                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3165
3166         if (!tail) {
3167                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3168
3169         } else {
3170                 while (tail->next)
3171                         tail = tail->next;
3172                 tail->next = cb;
3173         }
3174         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3175
3176         return cb;
3177 }
3178
3179 int
3180 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3181                 struct rte_eth_rxtx_callback *user_cb)
3182 {
3183 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3184         return -ENOTSUP;
3185 #endif
3186         /* Check input parameters. */
3187         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3188         if (user_cb == NULL ||
3189                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3190                 return -EINVAL;
3191
3192         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3193         struct rte_eth_rxtx_callback *cb;
3194         struct rte_eth_rxtx_callback **prev_cb;
3195         int ret = -EINVAL;
3196
3197         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3198         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3199         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3200                 cb = *prev_cb;
3201                 if (cb == user_cb) {
3202                         /* Remove the user cb from the callback list. */
3203                         *prev_cb = cb->next;
3204                         ret = 0;
3205                         break;
3206                 }
3207         }
3208         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3209
3210         return ret;
3211 }
3212
3213 int
3214 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3215                 struct rte_eth_rxtx_callback *user_cb)
3216 {
3217 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3218         return -ENOTSUP;
3219 #endif
3220         /* Check input parameters. */
3221         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3222         if (user_cb == NULL ||
3223                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3224                 return -EINVAL;
3225
3226         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3227         int ret = -EINVAL;
3228         struct rte_eth_rxtx_callback *cb;
3229         struct rte_eth_rxtx_callback **prev_cb;
3230
3231         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3232         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3233         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3234                 cb = *prev_cb;
3235                 if (cb == user_cb) {
3236                         /* Remove the user cb from the callback list. */
3237                         *prev_cb = cb->next;
3238                         ret = 0;
3239                         break;
3240                 }
3241         }
3242         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3243
3244         return ret;
3245 }
3246
3247 int
3248 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3249         struct rte_eth_rxq_info *qinfo)
3250 {
3251         struct rte_eth_dev *dev;
3252
3253         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3254
3255         if (qinfo == NULL)
3256                 return -EINVAL;
3257
3258         dev = &rte_eth_devices[port_id];
3259         if (queue_id >= dev->data->nb_rx_queues) {
3260                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3261                 return -EINVAL;
3262         }
3263
3264         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3265
3266         memset(qinfo, 0, sizeof(*qinfo));
3267         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3268         return 0;
3269 }
3270
3271 int
3272 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3273         struct rte_eth_txq_info *qinfo)
3274 {
3275         struct rte_eth_dev *dev;
3276
3277         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3278
3279         if (qinfo == NULL)
3280                 return -EINVAL;
3281
3282         dev = &rte_eth_devices[port_id];
3283         if (queue_id >= dev->data->nb_tx_queues) {
3284                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3285                 return -EINVAL;
3286         }
3287
3288         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3289
3290         memset(qinfo, 0, sizeof(*qinfo));
3291         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3292         return 0;
3293 }
3294
3295 int
3296 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3297                              struct ether_addr *mc_addr_set,
3298                              uint32_t nb_mc_addr)
3299 {
3300         struct rte_eth_dev *dev;
3301
3302         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3303
3304         dev = &rte_eth_devices[port_id];
3305         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3306         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3307 }
3308
3309 int
3310 rte_eth_timesync_enable(uint16_t port_id)
3311 {
3312         struct rte_eth_dev *dev;
3313
3314         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3315         dev = &rte_eth_devices[port_id];
3316
3317         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3318         return (*dev->dev_ops->timesync_enable)(dev);
3319 }
3320
3321 int
3322 rte_eth_timesync_disable(uint16_t port_id)
3323 {
3324         struct rte_eth_dev *dev;
3325
3326         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3327         dev = &rte_eth_devices[port_id];
3328
3329         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3330         return (*dev->dev_ops->timesync_disable)(dev);
3331 }
3332
3333 int
3334 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3335                                    uint32_t flags)
3336 {
3337         struct rte_eth_dev *dev;
3338
3339         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3340         dev = &rte_eth_devices[port_id];
3341
3342         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3343         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3344 }
3345
3346 int
3347 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3348                                    struct timespec *timestamp)
3349 {
3350         struct rte_eth_dev *dev;
3351
3352         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3353         dev = &rte_eth_devices[port_id];
3354
3355         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3356         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3357 }
3358
3359 int
3360 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3361 {
3362         struct rte_eth_dev *dev;
3363
3364         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3365         dev = &rte_eth_devices[port_id];
3366
3367         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3368         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3369 }
3370
3371 int
3372 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3373 {
3374         struct rte_eth_dev *dev;
3375
3376         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3377         dev = &rte_eth_devices[port_id];
3378
3379         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3380         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3381 }
3382
3383 int
3384 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3385 {
3386         struct rte_eth_dev *dev;
3387
3388         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3389         dev = &rte_eth_devices[port_id];
3390
3391         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3392         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3393 }
3394
3395 int
3396 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3397 {
3398         struct rte_eth_dev *dev;
3399
3400         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3401
3402         dev = &rte_eth_devices[port_id];
3403         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3404         return (*dev->dev_ops->get_reg)(dev, info);
3405 }
3406
3407 int
3408 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3409 {
3410         struct rte_eth_dev *dev;
3411
3412         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3413
3414         dev = &rte_eth_devices[port_id];
3415         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3416         return (*dev->dev_ops->get_eeprom_length)(dev);
3417 }
3418
3419 int
3420 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3421 {
3422         struct rte_eth_dev *dev;
3423
3424         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3425
3426         dev = &rte_eth_devices[port_id];
3427         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3428         return (*dev->dev_ops->get_eeprom)(dev, info);
3429 }
3430
3431 int
3432 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3433 {
3434         struct rte_eth_dev *dev;
3435
3436         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3437
3438         dev = &rte_eth_devices[port_id];
3439         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3440         return (*dev->dev_ops->set_eeprom)(dev, info);
3441 }
3442
3443 int
3444 rte_eth_dev_get_dcb_info(uint16_t port_id,
3445                              struct rte_eth_dcb_info *dcb_info)
3446 {
3447         struct rte_eth_dev *dev;
3448
3449         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3450
3451         dev = &rte_eth_devices[port_id];
3452         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3453
3454         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3455         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3456 }
3457
3458 int
3459 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3460                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3461 {
3462         struct rte_eth_dev *dev;
3463
3464         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3465         if (l2_tunnel == NULL) {
3466                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3467                 return -EINVAL;
3468         }
3469
3470         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3471                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3472                 return -EINVAL;
3473         }
3474
3475         dev = &rte_eth_devices[port_id];
3476         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3477                                 -ENOTSUP);
3478         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3479 }
3480
3481 int
3482 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3483                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3484                                   uint32_t mask,
3485                                   uint8_t en)
3486 {
3487         struct rte_eth_dev *dev;
3488
3489         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3490
3491         if (l2_tunnel == NULL) {
3492                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3493                 return -EINVAL;
3494         }
3495
3496         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3497                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3498                 return -EINVAL;
3499         }
3500
3501         if (mask == 0) {
3502                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3503                 return -EINVAL;
3504         }
3505
3506         dev = &rte_eth_devices[port_id];
3507         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3508                                 -ENOTSUP);
3509         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3510 }
3511
3512 static void
3513 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3514                            const struct rte_eth_desc_lim *desc_lim)
3515 {
3516         if (desc_lim->nb_align != 0)
3517                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3518
3519         if (desc_lim->nb_max != 0)
3520                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3521
3522         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3523 }
3524
3525 int
3526 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3527                                  uint16_t *nb_rx_desc,
3528                                  uint16_t *nb_tx_desc)
3529 {
3530         struct rte_eth_dev *dev;
3531         struct rte_eth_dev_info dev_info;
3532
3533         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3534
3535         dev = &rte_eth_devices[port_id];
3536         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3537
3538         rte_eth_dev_info_get(port_id, &dev_info);
3539
3540         if (nb_rx_desc != NULL)
3541                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3542
3543         if (nb_tx_desc != NULL)
3544                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3545
3546         return 0;
3547 }
3548
3549 int
3550 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3551 {
3552         struct rte_eth_dev *dev;
3553
3554         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3555
3556         if (pool == NULL)
3557                 return -EINVAL;
3558
3559         dev = &rte_eth_devices[port_id];
3560
3561         if (*dev->dev_ops->pool_ops_supported == NULL)
3562                 return 1; /* all pools are supported */
3563
3564         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
3565 }