ethdev: optimize xstats by ids APIs
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37
38 #include "rte_ether.h"
39 #include "rte_ethdev.h"
40 #include "ethdev_profile.h"
41
42 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
43 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
44 static struct rte_eth_dev_data *rte_eth_dev_data;
45 static uint8_t eth_dev_last_created_port;
46
47 /* spinlock for eth device callbacks */
48 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* spinlock for add/remove rx callbacks */
51 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
52
53 /* spinlock for add/remove tx callbacks */
54 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* store statistics names and its offset in stats structure  */
57 struct rte_eth_xstats_name_off {
58         char name[RTE_ETH_XSTATS_NAME_SIZE];
59         unsigned offset;
60 };
61
62 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
63         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
64         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
65         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
66         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
67         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
68         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
69         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
70         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
71                 rx_nombuf)},
72 };
73
74 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
75
76 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
77         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
78         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
79         {"errors", offsetof(struct rte_eth_stats, q_errors)},
80 };
81
82 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
83                 sizeof(rte_rxq_stats_strings[0]))
84
85 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
86         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
87         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
88 };
89 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
90                 sizeof(rte_txq_stats_strings[0]))
91
92
93 /**
94  * The user application callback description.
95  *
96  * It contains callback address to be registered by user application,
97  * the pointer to the parameters for callback, and the event type.
98  */
99 struct rte_eth_dev_callback {
100         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
101         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
102         void *cb_arg;                           /**< Parameter for callback */
103         void *ret_param;                        /**< Return parameter */
104         enum rte_eth_event_type event;          /**< Interrupt event type */
105         uint32_t active;                        /**< Callback is executing */
106 };
107
108 enum {
109         STAT_QMAP_TX = 0,
110         STAT_QMAP_RX
111 };
112
113 uint16_t
114 rte_eth_find_next(uint16_t port_id)
115 {
116         while (port_id < RTE_MAX_ETHPORTS &&
117                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
118                 port_id++;
119
120         if (port_id >= RTE_MAX_ETHPORTS)
121                 return RTE_MAX_ETHPORTS;
122
123         return port_id;
124 }
125
126 static void
127 rte_eth_dev_data_alloc(void)
128 {
129         const unsigned flags = 0;
130         const struct rte_memzone *mz;
131
132         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
133                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
134                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
135                                 rte_socket_id(), flags);
136         } else
137                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
138         if (mz == NULL)
139                 rte_panic("Cannot allocate memzone for ethernet port data\n");
140
141         rte_eth_dev_data = mz->addr;
142         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
143                 memset(rte_eth_dev_data, 0,
144                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
145 }
146
147 struct rte_eth_dev *
148 rte_eth_dev_allocated(const char *name)
149 {
150         unsigned i;
151
152         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
153                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
154                     strcmp(rte_eth_devices[i].data->name, name) == 0)
155                         return &rte_eth_devices[i];
156         }
157         return NULL;
158 }
159
160 static uint16_t
161 rte_eth_dev_find_free_port(void)
162 {
163         unsigned i;
164
165         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
166                 if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED)
167                         return i;
168         }
169         return RTE_MAX_ETHPORTS;
170 }
171
172 static struct rte_eth_dev *
173 eth_dev_get(uint16_t port_id)
174 {
175         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
176
177         eth_dev->data = &rte_eth_dev_data[port_id];
178         eth_dev->state = RTE_ETH_DEV_ATTACHED;
179         TAILQ_INIT(&(eth_dev->link_intr_cbs));
180
181         eth_dev_last_created_port = port_id;
182
183         return eth_dev;
184 }
185
186 struct rte_eth_dev *
187 rte_eth_dev_allocate(const char *name)
188 {
189         uint16_t port_id;
190         struct rte_eth_dev *eth_dev;
191
192         port_id = rte_eth_dev_find_free_port();
193         if (port_id == RTE_MAX_ETHPORTS) {
194                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
195                 return NULL;
196         }
197
198         if (rte_eth_dev_data == NULL)
199                 rte_eth_dev_data_alloc();
200
201         if (rte_eth_dev_allocated(name) != NULL) {
202                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
203                                 name);
204                 return NULL;
205         }
206
207         memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data));
208         eth_dev = eth_dev_get(port_id);
209         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
210         eth_dev->data->port_id = port_id;
211         eth_dev->data->mtu = ETHER_MTU;
212
213         return eth_dev;
214 }
215
216 /*
217  * Attach to a port already registered by the primary process, which
218  * makes sure that the same device would have the same port id both
219  * in the primary and secondary process.
220  */
221 struct rte_eth_dev *
222 rte_eth_dev_attach_secondary(const char *name)
223 {
224         uint16_t i;
225         struct rte_eth_dev *eth_dev;
226
227         if (rte_eth_dev_data == NULL)
228                 rte_eth_dev_data_alloc();
229
230         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
231                 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
232                         break;
233         }
234         if (i == RTE_MAX_ETHPORTS) {
235                 RTE_PMD_DEBUG_TRACE(
236                         "device %s is not driven by the primary process\n",
237                         name);
238                 return NULL;
239         }
240
241         eth_dev = eth_dev_get(i);
242         RTE_ASSERT(eth_dev->data->port_id == i);
243
244         return eth_dev;
245 }
246
247 int
248 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
249 {
250         if (eth_dev == NULL)
251                 return -EINVAL;
252
253         eth_dev->state = RTE_ETH_DEV_UNUSED;
254         return 0;
255 }
256
257 int
258 rte_eth_dev_is_valid_port(uint16_t port_id)
259 {
260         if (port_id >= RTE_MAX_ETHPORTS ||
261             (rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
262              rte_eth_devices[port_id].state != RTE_ETH_DEV_DEFERRED))
263                 return 0;
264         else
265                 return 1;
266 }
267
268 int
269 rte_eth_dev_socket_id(uint16_t port_id)
270 {
271         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
272         return rte_eth_devices[port_id].data->numa_node;
273 }
274
275 void *
276 rte_eth_dev_get_sec_ctx(uint8_t port_id)
277 {
278         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
279         return rte_eth_devices[port_id].security_ctx;
280 }
281
282 uint16_t
283 rte_eth_dev_count(void)
284 {
285         uint16_t p;
286         uint16_t count;
287
288         count = 0;
289
290         RTE_ETH_FOREACH_DEV(p)
291                 count++;
292
293         return count;
294 }
295
296 int
297 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
298 {
299         char *tmp;
300
301         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
302
303         if (name == NULL) {
304                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
305                 return -EINVAL;
306         }
307
308         /* shouldn't check 'rte_eth_devices[i].data',
309          * because it might be overwritten by VDEV PMD */
310         tmp = rte_eth_dev_data[port_id].name;
311         strcpy(name, tmp);
312         return 0;
313 }
314
315 int
316 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
317 {
318         int i;
319
320         if (name == NULL) {
321                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
322                 return -EINVAL;
323         }
324
325         RTE_ETH_FOREACH_DEV(i) {
326                 if (!strncmp(name,
327                         rte_eth_dev_data[i].name, strlen(name))) {
328
329                         *port_id = i;
330
331                         return 0;
332                 }
333         }
334         return -ENODEV;
335 }
336
337 /* attach the new device, then store port_id of the device */
338 int
339 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
340 {
341         int ret = -1;
342         int current = rte_eth_dev_count();
343         char *name = NULL;
344         char *args = NULL;
345
346         if ((devargs == NULL) || (port_id == NULL)) {
347                 ret = -EINVAL;
348                 goto err;
349         }
350
351         /* parse devargs, then retrieve device name and args */
352         if (rte_eal_parse_devargs_str(devargs, &name, &args))
353                 goto err;
354
355         ret = rte_eal_dev_attach(name, args);
356         if (ret < 0)
357                 goto err;
358
359         /* no point looking at the port count if no port exists */
360         if (!rte_eth_dev_count()) {
361                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
362                 ret = -1;
363                 goto err;
364         }
365
366         /* if nothing happened, there is a bug here, since some driver told us
367          * it did attach a device, but did not create a port.
368          */
369         if (current == rte_eth_dev_count()) {
370                 ret = -1;
371                 goto err;
372         }
373
374         *port_id = eth_dev_last_created_port;
375         ret = 0;
376
377 err:
378         free(name);
379         free(args);
380         return ret;
381 }
382
383 /* detach the device, then store the name of the device */
384 int
385 rte_eth_dev_detach(uint16_t port_id, char *name)
386 {
387         uint32_t dev_flags;
388         int ret = -1;
389
390         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
391
392         if (name == NULL) {
393                 ret = -EINVAL;
394                 goto err;
395         }
396
397         dev_flags = rte_eth_devices[port_id].data->dev_flags;
398         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
399                 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
400                         port_id);
401                 ret = -ENOTSUP;
402                 goto err;
403         }
404
405         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
406                  "%s", rte_eth_devices[port_id].data->name);
407
408         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
409         if (ret < 0)
410                 goto err;
411
412         rte_eth_devices[port_id].state = RTE_ETH_DEV_UNUSED;
413         return 0;
414
415 err:
416         return ret;
417 }
418
419 static int
420 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
421 {
422         uint16_t old_nb_queues = dev->data->nb_rx_queues;
423         void **rxq;
424         unsigned i;
425
426         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
427                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
428                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
429                                 RTE_CACHE_LINE_SIZE);
430                 if (dev->data->rx_queues == NULL) {
431                         dev->data->nb_rx_queues = 0;
432                         return -(ENOMEM);
433                 }
434         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
435                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
436
437                 rxq = dev->data->rx_queues;
438
439                 for (i = nb_queues; i < old_nb_queues; i++)
440                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
441                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
442                                 RTE_CACHE_LINE_SIZE);
443                 if (rxq == NULL)
444                         return -(ENOMEM);
445                 if (nb_queues > old_nb_queues) {
446                         uint16_t new_qs = nb_queues - old_nb_queues;
447
448                         memset(rxq + old_nb_queues, 0,
449                                 sizeof(rxq[0]) * new_qs);
450                 }
451
452                 dev->data->rx_queues = rxq;
453
454         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
455                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
456
457                 rxq = dev->data->rx_queues;
458
459                 for (i = nb_queues; i < old_nb_queues; i++)
460                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
461
462                 rte_free(dev->data->rx_queues);
463                 dev->data->rx_queues = NULL;
464         }
465         dev->data->nb_rx_queues = nb_queues;
466         return 0;
467 }
468
469 int
470 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
471 {
472         struct rte_eth_dev *dev;
473
474         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
475
476         dev = &rte_eth_devices[port_id];
477         if (rx_queue_id >= dev->data->nb_rx_queues) {
478                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
479                 return -EINVAL;
480         }
481
482         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
483
484         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
485                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
486                         " already started\n",
487                         rx_queue_id, port_id);
488                 return 0;
489         }
490
491         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
492
493 }
494
495 int
496 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
497 {
498         struct rte_eth_dev *dev;
499
500         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
501
502         dev = &rte_eth_devices[port_id];
503         if (rx_queue_id >= dev->data->nb_rx_queues) {
504                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
505                 return -EINVAL;
506         }
507
508         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
509
510         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
511                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
512                         " already stopped\n",
513                         rx_queue_id, port_id);
514                 return 0;
515         }
516
517         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
518
519 }
520
521 int
522 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
523 {
524         struct rte_eth_dev *dev;
525
526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
527
528         dev = &rte_eth_devices[port_id];
529         if (tx_queue_id >= dev->data->nb_tx_queues) {
530                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
531                 return -EINVAL;
532         }
533
534         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
535
536         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
537                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
538                         " already started\n",
539                         tx_queue_id, port_id);
540                 return 0;
541         }
542
543         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
544
545 }
546
547 int
548 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
549 {
550         struct rte_eth_dev *dev;
551
552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
553
554         dev = &rte_eth_devices[port_id];
555         if (tx_queue_id >= dev->data->nb_tx_queues) {
556                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
557                 return -EINVAL;
558         }
559
560         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
561
562         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
563                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
564                         " already stopped\n",
565                         tx_queue_id, port_id);
566                 return 0;
567         }
568
569         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
570
571 }
572
573 static int
574 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
575 {
576         uint16_t old_nb_queues = dev->data->nb_tx_queues;
577         void **txq;
578         unsigned i;
579
580         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
581                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
582                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
583                                                    RTE_CACHE_LINE_SIZE);
584                 if (dev->data->tx_queues == NULL) {
585                         dev->data->nb_tx_queues = 0;
586                         return -(ENOMEM);
587                 }
588         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
589                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
590
591                 txq = dev->data->tx_queues;
592
593                 for (i = nb_queues; i < old_nb_queues; i++)
594                         (*dev->dev_ops->tx_queue_release)(txq[i]);
595                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
596                                   RTE_CACHE_LINE_SIZE);
597                 if (txq == NULL)
598                         return -ENOMEM;
599                 if (nb_queues > old_nb_queues) {
600                         uint16_t new_qs = nb_queues - old_nb_queues;
601
602                         memset(txq + old_nb_queues, 0,
603                                sizeof(txq[0]) * new_qs);
604                 }
605
606                 dev->data->tx_queues = txq;
607
608         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
609                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
610
611                 txq = dev->data->tx_queues;
612
613                 for (i = nb_queues; i < old_nb_queues; i++)
614                         (*dev->dev_ops->tx_queue_release)(txq[i]);
615
616                 rte_free(dev->data->tx_queues);
617                 dev->data->tx_queues = NULL;
618         }
619         dev->data->nb_tx_queues = nb_queues;
620         return 0;
621 }
622
623 uint32_t
624 rte_eth_speed_bitflag(uint32_t speed, int duplex)
625 {
626         switch (speed) {
627         case ETH_SPEED_NUM_10M:
628                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
629         case ETH_SPEED_NUM_100M:
630                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
631         case ETH_SPEED_NUM_1G:
632                 return ETH_LINK_SPEED_1G;
633         case ETH_SPEED_NUM_2_5G:
634                 return ETH_LINK_SPEED_2_5G;
635         case ETH_SPEED_NUM_5G:
636                 return ETH_LINK_SPEED_5G;
637         case ETH_SPEED_NUM_10G:
638                 return ETH_LINK_SPEED_10G;
639         case ETH_SPEED_NUM_20G:
640                 return ETH_LINK_SPEED_20G;
641         case ETH_SPEED_NUM_25G:
642                 return ETH_LINK_SPEED_25G;
643         case ETH_SPEED_NUM_40G:
644                 return ETH_LINK_SPEED_40G;
645         case ETH_SPEED_NUM_50G:
646                 return ETH_LINK_SPEED_50G;
647         case ETH_SPEED_NUM_56G:
648                 return ETH_LINK_SPEED_56G;
649         case ETH_SPEED_NUM_100G:
650                 return ETH_LINK_SPEED_100G;
651         default:
652                 return 0;
653         }
654 }
655
656 /**
657  * A conversion function from rxmode bitfield API.
658  */
659 static void
660 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
661                                     uint64_t *rx_offloads)
662 {
663         uint64_t offloads = 0;
664
665         if (rxmode->header_split == 1)
666                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
667         if (rxmode->hw_ip_checksum == 1)
668                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
669         if (rxmode->hw_vlan_filter == 1)
670                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
671         if (rxmode->hw_vlan_strip == 1)
672                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
673         if (rxmode->hw_vlan_extend == 1)
674                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
675         if (rxmode->jumbo_frame == 1)
676                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
677         if (rxmode->hw_strip_crc == 1)
678                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
679         if (rxmode->enable_scatter == 1)
680                 offloads |= DEV_RX_OFFLOAD_SCATTER;
681         if (rxmode->enable_lro == 1)
682                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
683         if (rxmode->hw_timestamp == 1)
684                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
685         if (rxmode->security == 1)
686                 offloads |= DEV_RX_OFFLOAD_SECURITY;
687
688         *rx_offloads = offloads;
689 }
690
691 /**
692  * A conversion function from rxmode offloads API.
693  */
694 static void
695 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
696                             struct rte_eth_rxmode *rxmode)
697 {
698
699         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
700                 rxmode->header_split = 1;
701         else
702                 rxmode->header_split = 0;
703         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
704                 rxmode->hw_ip_checksum = 1;
705         else
706                 rxmode->hw_ip_checksum = 0;
707         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
708                 rxmode->hw_vlan_filter = 1;
709         else
710                 rxmode->hw_vlan_filter = 0;
711         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
712                 rxmode->hw_vlan_strip = 1;
713         else
714                 rxmode->hw_vlan_strip = 0;
715         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
716                 rxmode->hw_vlan_extend = 1;
717         else
718                 rxmode->hw_vlan_extend = 0;
719         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
720                 rxmode->jumbo_frame = 1;
721         else
722                 rxmode->jumbo_frame = 0;
723         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
724                 rxmode->hw_strip_crc = 1;
725         else
726                 rxmode->hw_strip_crc = 0;
727         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
728                 rxmode->enable_scatter = 1;
729         else
730                 rxmode->enable_scatter = 0;
731         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
732                 rxmode->enable_lro = 1;
733         else
734                 rxmode->enable_lro = 0;
735         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
736                 rxmode->hw_timestamp = 1;
737         else
738                 rxmode->hw_timestamp = 0;
739         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
740                 rxmode->security = 1;
741         else
742                 rxmode->security = 0;
743 }
744
745 int
746 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
747                       const struct rte_eth_conf *dev_conf)
748 {
749         struct rte_eth_dev *dev;
750         struct rte_eth_dev_info dev_info;
751         struct rte_eth_conf local_conf = *dev_conf;
752         int diag;
753
754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
755
756         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
757                 RTE_PMD_DEBUG_TRACE(
758                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
759                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
760                 return -EINVAL;
761         }
762
763         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
764                 RTE_PMD_DEBUG_TRACE(
765                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
766                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
767                 return -EINVAL;
768         }
769
770         dev = &rte_eth_devices[port_id];
771
772         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
773         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
774
775         if (dev->data->dev_started) {
776                 RTE_PMD_DEBUG_TRACE(
777                     "port %d must be stopped to allow configuration\n", port_id);
778                 return -EBUSY;
779         }
780
781         /*
782          * Convert between the offloads API to enable PMDs to support
783          * only one of them.
784          */
785         if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
786                 rte_eth_convert_rx_offload_bitfield(
787                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
788         } else {
789                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
790                                             &local_conf.rxmode);
791         }
792
793         /* Copy the dev_conf parameter into the dev structure */
794         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
795
796         /*
797          * Check that the numbers of RX and TX queues are not greater
798          * than the maximum number of RX and TX queues supported by the
799          * configured device.
800          */
801         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
802
803         if (nb_rx_q == 0 && nb_tx_q == 0) {
804                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
805                 return -EINVAL;
806         }
807
808         if (nb_rx_q > dev_info.max_rx_queues) {
809                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
810                                 port_id, nb_rx_q, dev_info.max_rx_queues);
811                 return -EINVAL;
812         }
813
814         if (nb_tx_q > dev_info.max_tx_queues) {
815                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
816                                 port_id, nb_tx_q, dev_info.max_tx_queues);
817                 return -EINVAL;
818         }
819
820         /* Check that the device supports requested interrupts */
821         if ((dev_conf->intr_conf.lsc == 1) &&
822                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
823                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
824                                         dev->device->driver->name);
825                         return -EINVAL;
826         }
827         if ((dev_conf->intr_conf.rmv == 1) &&
828             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
829                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
830                                     dev->device->driver->name);
831                 return -EINVAL;
832         }
833
834         /*
835          * If jumbo frames are enabled, check that the maximum RX packet
836          * length is supported by the configured device.
837          */
838         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
839                 if (dev_conf->rxmode.max_rx_pkt_len >
840                     dev_info.max_rx_pktlen) {
841                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
842                                 " > max valid value %u\n",
843                                 port_id,
844                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
845                                 (unsigned)dev_info.max_rx_pktlen);
846                         return -EINVAL;
847                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
848                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
849                                 " < min valid value %u\n",
850                                 port_id,
851                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
852                                 (unsigned)ETHER_MIN_LEN);
853                         return -EINVAL;
854                 }
855         } else {
856                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
857                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
858                         /* Use default value */
859                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
860                                                         ETHER_MAX_LEN;
861         }
862
863         /*
864          * Setup new number of RX/TX queues and reconfigure device.
865          */
866         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
867         if (diag != 0) {
868                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
869                                 port_id, diag);
870                 return diag;
871         }
872
873         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
874         if (diag != 0) {
875                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
876                                 port_id, diag);
877                 rte_eth_dev_rx_queue_config(dev, 0);
878                 return diag;
879         }
880
881         diag = (*dev->dev_ops->dev_configure)(dev);
882         if (diag != 0) {
883                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
884                                 port_id, diag);
885                 rte_eth_dev_rx_queue_config(dev, 0);
886                 rte_eth_dev_tx_queue_config(dev, 0);
887                 return diag;
888         }
889
890         /* Initialize Rx profiling if enabled at compilation time. */
891         diag = __rte_eth_profile_rx_init(port_id, dev);
892         if (diag != 0) {
893                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
894                                 port_id, diag);
895                 rte_eth_dev_rx_queue_config(dev, 0);
896                 rte_eth_dev_tx_queue_config(dev, 0);
897                 return diag;
898         }
899
900         return 0;
901 }
902
903 void
904 _rte_eth_dev_reset(struct rte_eth_dev *dev)
905 {
906         if (dev->data->dev_started) {
907                 RTE_PMD_DEBUG_TRACE(
908                         "port %d must be stopped to allow reset\n",
909                         dev->data->port_id);
910                 return;
911         }
912
913         rte_eth_dev_rx_queue_config(dev, 0);
914         rte_eth_dev_tx_queue_config(dev, 0);
915
916         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
917 }
918
919 static void
920 rte_eth_dev_config_restore(uint16_t port_id)
921 {
922         struct rte_eth_dev *dev;
923         struct rte_eth_dev_info dev_info;
924         struct ether_addr *addr;
925         uint16_t i;
926         uint32_t pool = 0;
927         uint64_t pool_mask;
928
929         dev = &rte_eth_devices[port_id];
930
931         rte_eth_dev_info_get(port_id, &dev_info);
932
933         /* replay MAC address configuration including default MAC */
934         addr = &dev->data->mac_addrs[0];
935         if (*dev->dev_ops->mac_addr_set != NULL)
936                 (*dev->dev_ops->mac_addr_set)(dev, addr);
937         else if (*dev->dev_ops->mac_addr_add != NULL)
938                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
939
940         if (*dev->dev_ops->mac_addr_add != NULL) {
941                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
942                         addr = &dev->data->mac_addrs[i];
943
944                         /* skip zero address */
945                         if (is_zero_ether_addr(addr))
946                                 continue;
947
948                         pool = 0;
949                         pool_mask = dev->data->mac_pool_sel[i];
950
951                         do {
952                                 if (pool_mask & 1ULL)
953                                         (*dev->dev_ops->mac_addr_add)(dev,
954                                                 addr, i, pool);
955                                 pool_mask >>= 1;
956                                 pool++;
957                         } while (pool_mask);
958                 }
959         }
960
961         /* replay promiscuous configuration */
962         if (rte_eth_promiscuous_get(port_id) == 1)
963                 rte_eth_promiscuous_enable(port_id);
964         else if (rte_eth_promiscuous_get(port_id) == 0)
965                 rte_eth_promiscuous_disable(port_id);
966
967         /* replay all multicast configuration */
968         if (rte_eth_allmulticast_get(port_id) == 1)
969                 rte_eth_allmulticast_enable(port_id);
970         else if (rte_eth_allmulticast_get(port_id) == 0)
971                 rte_eth_allmulticast_disable(port_id);
972 }
973
974 int
975 rte_eth_dev_start(uint16_t port_id)
976 {
977         struct rte_eth_dev *dev;
978         int diag;
979
980         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
981
982         dev = &rte_eth_devices[port_id];
983
984         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
985
986         if (dev->data->dev_started != 0) {
987                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
988                         " already started\n",
989                         port_id);
990                 return 0;
991         }
992
993         diag = (*dev->dev_ops->dev_start)(dev);
994         if (diag == 0)
995                 dev->data->dev_started = 1;
996         else
997                 return diag;
998
999         rte_eth_dev_config_restore(port_id);
1000
1001         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1002                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1003                 (*dev->dev_ops->link_update)(dev, 0);
1004         }
1005         return 0;
1006 }
1007
1008 void
1009 rte_eth_dev_stop(uint16_t port_id)
1010 {
1011         struct rte_eth_dev *dev;
1012
1013         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1014         dev = &rte_eth_devices[port_id];
1015
1016         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1017
1018         if (dev->data->dev_started == 0) {
1019                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1020                         " already stopped\n",
1021                         port_id);
1022                 return;
1023         }
1024
1025         dev->data->dev_started = 0;
1026         (*dev->dev_ops->dev_stop)(dev);
1027 }
1028
1029 int
1030 rte_eth_dev_set_link_up(uint16_t port_id)
1031 {
1032         struct rte_eth_dev *dev;
1033
1034         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1035
1036         dev = &rte_eth_devices[port_id];
1037
1038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1039         return (*dev->dev_ops->dev_set_link_up)(dev);
1040 }
1041
1042 int
1043 rte_eth_dev_set_link_down(uint16_t port_id)
1044 {
1045         struct rte_eth_dev *dev;
1046
1047         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1048
1049         dev = &rte_eth_devices[port_id];
1050
1051         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1052         return (*dev->dev_ops->dev_set_link_down)(dev);
1053 }
1054
1055 void
1056 rte_eth_dev_close(uint16_t port_id)
1057 {
1058         struct rte_eth_dev *dev;
1059
1060         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1061         dev = &rte_eth_devices[port_id];
1062
1063         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1064         dev->data->dev_started = 0;
1065         (*dev->dev_ops->dev_close)(dev);
1066
1067         dev->data->nb_rx_queues = 0;
1068         rte_free(dev->data->rx_queues);
1069         dev->data->rx_queues = NULL;
1070         dev->data->nb_tx_queues = 0;
1071         rte_free(dev->data->tx_queues);
1072         dev->data->tx_queues = NULL;
1073 }
1074
1075 int
1076 rte_eth_dev_reset(uint16_t port_id)
1077 {
1078         struct rte_eth_dev *dev;
1079         int ret;
1080
1081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1082         dev = &rte_eth_devices[port_id];
1083
1084         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1085
1086         rte_eth_dev_stop(port_id);
1087         ret = dev->dev_ops->dev_reset(dev);
1088
1089         return ret;
1090 }
1091
1092 int
1093 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1094                        uint16_t nb_rx_desc, unsigned int socket_id,
1095                        const struct rte_eth_rxconf *rx_conf,
1096                        struct rte_mempool *mp)
1097 {
1098         int ret;
1099         uint32_t mbp_buf_size;
1100         struct rte_eth_dev *dev;
1101         struct rte_eth_dev_info dev_info;
1102         struct rte_eth_rxconf local_conf;
1103         void **rxq;
1104
1105         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1106
1107         dev = &rte_eth_devices[port_id];
1108         if (rx_queue_id >= dev->data->nb_rx_queues) {
1109                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1110                 return -EINVAL;
1111         }
1112
1113         if (dev->data->dev_started) {
1114                 RTE_PMD_DEBUG_TRACE(
1115                     "port %d must be stopped to allow configuration\n", port_id);
1116                 return -EBUSY;
1117         }
1118
1119         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1120         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1121
1122         /*
1123          * Check the size of the mbuf data buffer.
1124          * This value must be provided in the private data of the memory pool.
1125          * First check that the memory pool has a valid private data.
1126          */
1127         rte_eth_dev_info_get(port_id, &dev_info);
1128         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1129                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1130                                 mp->name, (int) mp->private_data_size,
1131                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1132                 return -ENOSPC;
1133         }
1134         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1135
1136         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1137                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1138                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1139                                 "=%d)\n",
1140                                 mp->name,
1141                                 (int)mbp_buf_size,
1142                                 (int)(RTE_PKTMBUF_HEADROOM +
1143                                       dev_info.min_rx_bufsize),
1144                                 (int)RTE_PKTMBUF_HEADROOM,
1145                                 (int)dev_info.min_rx_bufsize);
1146                 return -EINVAL;
1147         }
1148
1149         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1150                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1151                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1152
1153                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1154                         "should be: <= %hu, = %hu, and a product of %hu\n",
1155                         nb_rx_desc,
1156                         dev_info.rx_desc_lim.nb_max,
1157                         dev_info.rx_desc_lim.nb_min,
1158                         dev_info.rx_desc_lim.nb_align);
1159                 return -EINVAL;
1160         }
1161
1162         rxq = dev->data->rx_queues;
1163         if (rxq[rx_queue_id]) {
1164                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1165                                         -ENOTSUP);
1166                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1167                 rxq[rx_queue_id] = NULL;
1168         }
1169
1170         if (rx_conf == NULL)
1171                 rx_conf = &dev_info.default_rxconf;
1172
1173         local_conf = *rx_conf;
1174         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1175                 /**
1176                  * Reflect port offloads to queue offloads in order for
1177                  * offloads to not be discarded.
1178                  */
1179                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1180                                                     &local_conf.offloads);
1181         }
1182
1183         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1184                                               socket_id, &local_conf, mp);
1185         if (!ret) {
1186                 if (!dev->data->min_rx_buf_size ||
1187                     dev->data->min_rx_buf_size > mbp_buf_size)
1188                         dev->data->min_rx_buf_size = mbp_buf_size;
1189         }
1190
1191         return ret;
1192 }
1193
1194 /**
1195  * A conversion function from txq_flags API.
1196  */
1197 static void
1198 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1199 {
1200         uint64_t offloads = 0;
1201
1202         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1203                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1204         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1205                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1206         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1207                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1208         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1209                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1210         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1211                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1212         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1213             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1214                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1215
1216         *tx_offloads = offloads;
1217 }
1218
1219 /**
1220  * A conversion function from offloads API.
1221  */
1222 static void
1223 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1224 {
1225         uint32_t flags = 0;
1226
1227         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1228                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1229         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1230                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1231         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1232                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1233         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1234                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1235         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1236                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1237         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1238                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1239
1240         *txq_flags = flags;
1241 }
1242
1243 int
1244 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1245                        uint16_t nb_tx_desc, unsigned int socket_id,
1246                        const struct rte_eth_txconf *tx_conf)
1247 {
1248         struct rte_eth_dev *dev;
1249         struct rte_eth_dev_info dev_info;
1250         struct rte_eth_txconf local_conf;
1251         void **txq;
1252
1253         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1254
1255         dev = &rte_eth_devices[port_id];
1256         if (tx_queue_id >= dev->data->nb_tx_queues) {
1257                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1258                 return -EINVAL;
1259         }
1260
1261         if (dev->data->dev_started) {
1262                 RTE_PMD_DEBUG_TRACE(
1263                     "port %d must be stopped to allow configuration\n", port_id);
1264                 return -EBUSY;
1265         }
1266
1267         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1268         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1269
1270         rte_eth_dev_info_get(port_id, &dev_info);
1271
1272         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1273             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1274             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1275                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1276                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1277                                 nb_tx_desc,
1278                                 dev_info.tx_desc_lim.nb_max,
1279                                 dev_info.tx_desc_lim.nb_min,
1280                                 dev_info.tx_desc_lim.nb_align);
1281                 return -EINVAL;
1282         }
1283
1284         txq = dev->data->tx_queues;
1285         if (txq[tx_queue_id]) {
1286                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1287                                         -ENOTSUP);
1288                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1289                 txq[tx_queue_id] = NULL;
1290         }
1291
1292         if (tx_conf == NULL)
1293                 tx_conf = &dev_info.default_txconf;
1294
1295         /*
1296          * Convert between the offloads API to enable PMDs to support
1297          * only one of them.
1298          */
1299         local_conf = *tx_conf;
1300         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1301                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1302                                              &local_conf.txq_flags);
1303                 /* Keep the ignore flag. */
1304                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1305         } else {
1306                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1307                                           &local_conf.offloads);
1308         }
1309
1310         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1311                                                socket_id, &local_conf);
1312 }
1313
1314 void
1315 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1316                 void *userdata __rte_unused)
1317 {
1318         unsigned i;
1319
1320         for (i = 0; i < unsent; i++)
1321                 rte_pktmbuf_free(pkts[i]);
1322 }
1323
1324 void
1325 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1326                 void *userdata)
1327 {
1328         uint64_t *count = userdata;
1329         unsigned i;
1330
1331         for (i = 0; i < unsent; i++)
1332                 rte_pktmbuf_free(pkts[i]);
1333
1334         *count += unsent;
1335 }
1336
1337 int
1338 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1339                 buffer_tx_error_fn cbfn, void *userdata)
1340 {
1341         buffer->error_callback = cbfn;
1342         buffer->error_userdata = userdata;
1343         return 0;
1344 }
1345
1346 int
1347 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1348 {
1349         int ret = 0;
1350
1351         if (buffer == NULL)
1352                 return -EINVAL;
1353
1354         buffer->size = size;
1355         if (buffer->error_callback == NULL) {
1356                 ret = rte_eth_tx_buffer_set_err_callback(
1357                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1358         }
1359
1360         return ret;
1361 }
1362
1363 int
1364 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1365 {
1366         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1367
1368         /* Validate Input Data. Bail if not valid or not supported. */
1369         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1370         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1371
1372         /* Call driver to free pending mbufs. */
1373         return (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1374                         free_cnt);
1375 }
1376
1377 void
1378 rte_eth_promiscuous_enable(uint16_t port_id)
1379 {
1380         struct rte_eth_dev *dev;
1381
1382         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1383         dev = &rte_eth_devices[port_id];
1384
1385         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1386         (*dev->dev_ops->promiscuous_enable)(dev);
1387         dev->data->promiscuous = 1;
1388 }
1389
1390 void
1391 rte_eth_promiscuous_disable(uint16_t port_id)
1392 {
1393         struct rte_eth_dev *dev;
1394
1395         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1396         dev = &rte_eth_devices[port_id];
1397
1398         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1399         dev->data->promiscuous = 0;
1400         (*dev->dev_ops->promiscuous_disable)(dev);
1401 }
1402
1403 int
1404 rte_eth_promiscuous_get(uint16_t port_id)
1405 {
1406         struct rte_eth_dev *dev;
1407
1408         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1409
1410         dev = &rte_eth_devices[port_id];
1411         return dev->data->promiscuous;
1412 }
1413
1414 void
1415 rte_eth_allmulticast_enable(uint16_t port_id)
1416 {
1417         struct rte_eth_dev *dev;
1418
1419         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1420         dev = &rte_eth_devices[port_id];
1421
1422         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1423         (*dev->dev_ops->allmulticast_enable)(dev);
1424         dev->data->all_multicast = 1;
1425 }
1426
1427 void
1428 rte_eth_allmulticast_disable(uint16_t port_id)
1429 {
1430         struct rte_eth_dev *dev;
1431
1432         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1433         dev = &rte_eth_devices[port_id];
1434
1435         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1436         dev->data->all_multicast = 0;
1437         (*dev->dev_ops->allmulticast_disable)(dev);
1438 }
1439
1440 int
1441 rte_eth_allmulticast_get(uint16_t port_id)
1442 {
1443         struct rte_eth_dev *dev;
1444
1445         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1446
1447         dev = &rte_eth_devices[port_id];
1448         return dev->data->all_multicast;
1449 }
1450
1451 static inline int
1452 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1453                                 struct rte_eth_link *link)
1454 {
1455         struct rte_eth_link *dst = link;
1456         struct rte_eth_link *src = &(dev->data->dev_link);
1457
1458         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1459                                         *(uint64_t *)src) == 0)
1460                 return -1;
1461
1462         return 0;
1463 }
1464
1465 void
1466 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1467 {
1468         struct rte_eth_dev *dev;
1469
1470         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1471         dev = &rte_eth_devices[port_id];
1472
1473         if (dev->data->dev_conf.intr_conf.lsc != 0)
1474                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1475         else {
1476                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1477                 (*dev->dev_ops->link_update)(dev, 1);
1478                 *eth_link = dev->data->dev_link;
1479         }
1480 }
1481
1482 void
1483 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1484 {
1485         struct rte_eth_dev *dev;
1486
1487         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1488         dev = &rte_eth_devices[port_id];
1489
1490         if (dev->data->dev_conf.intr_conf.lsc != 0)
1491                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1492         else {
1493                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1494                 (*dev->dev_ops->link_update)(dev, 0);
1495                 *eth_link = dev->data->dev_link;
1496         }
1497 }
1498
1499 int
1500 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1501 {
1502         struct rte_eth_dev *dev;
1503
1504         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1505
1506         dev = &rte_eth_devices[port_id];
1507         memset(stats, 0, sizeof(*stats));
1508
1509         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1510         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1511         return (*dev->dev_ops->stats_get)(dev, stats);
1512 }
1513
1514 int
1515 rte_eth_stats_reset(uint16_t port_id)
1516 {
1517         struct rte_eth_dev *dev;
1518
1519         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1520         dev = &rte_eth_devices[port_id];
1521
1522         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1523         (*dev->dev_ops->stats_reset)(dev);
1524         dev->data->rx_mbuf_alloc_failed = 0;
1525
1526         return 0;
1527 }
1528
1529 static inline int
1530 get_xstats_basic_count(struct rte_eth_dev *dev)
1531 {
1532         uint16_t nb_rxqs, nb_txqs;
1533         int count;
1534
1535         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1536         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1537
1538         count = RTE_NB_STATS;
1539         count += nb_rxqs * RTE_NB_RXQ_STATS;
1540         count += nb_txqs * RTE_NB_TXQ_STATS;
1541
1542         return count;
1543 }
1544
1545 static int
1546 get_xstats_count(uint16_t port_id)
1547 {
1548         struct rte_eth_dev *dev;
1549         int count;
1550
1551         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1552         dev = &rte_eth_devices[port_id];
1553         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1554                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1555                                 NULL, 0);
1556                 if (count < 0)
1557                         return count;
1558         }
1559         if (dev->dev_ops->xstats_get_names != NULL) {
1560                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1561                 if (count < 0)
1562                         return count;
1563         } else
1564                 count = 0;
1565
1566
1567         count += get_xstats_basic_count(dev);
1568
1569         return count;
1570 }
1571
1572 int
1573 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1574                 uint64_t *id)
1575 {
1576         int cnt_xstats, idx_xstat;
1577
1578         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1579
1580         if (!id) {
1581                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1582                 return -ENOMEM;
1583         }
1584
1585         if (!xstat_name) {
1586                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1587                 return -ENOMEM;
1588         }
1589
1590         /* Get count */
1591         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1592         if (cnt_xstats  < 0) {
1593                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1594                 return -ENODEV;
1595         }
1596
1597         /* Get id-name lookup table */
1598         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1599
1600         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1601                         port_id, xstats_names, cnt_xstats, NULL)) {
1602                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1603                 return -1;
1604         }
1605
1606         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1607                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1608                         *id = idx_xstat;
1609                         return 0;
1610                 };
1611         }
1612
1613         return -EINVAL;
1614 }
1615
1616 /* retrieve basic stats names */
1617 static int
1618 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1619         struct rte_eth_xstat_name *xstats_names)
1620 {
1621         int cnt_used_entries = 0;
1622         uint32_t idx, id_queue;
1623         uint16_t num_q;
1624
1625         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1626                 snprintf(xstats_names[cnt_used_entries].name,
1627                         sizeof(xstats_names[0].name),
1628                         "%s", rte_stats_strings[idx].name);
1629                 cnt_used_entries++;
1630         }
1631         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1632         for (id_queue = 0; id_queue < num_q; id_queue++) {
1633                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1634                         snprintf(xstats_names[cnt_used_entries].name,
1635                                 sizeof(xstats_names[0].name),
1636                                 "rx_q%u%s",
1637                                 id_queue, rte_rxq_stats_strings[idx].name);
1638                         cnt_used_entries++;
1639                 }
1640
1641         }
1642         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1643         for (id_queue = 0; id_queue < num_q; id_queue++) {
1644                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1645                         snprintf(xstats_names[cnt_used_entries].name,
1646                                 sizeof(xstats_names[0].name),
1647                                 "tx_q%u%s",
1648                                 id_queue, rte_txq_stats_strings[idx].name);
1649                         cnt_used_entries++;
1650                 }
1651         }
1652         return cnt_used_entries;
1653 }
1654
1655 /* retrieve ethdev extended statistics names */
1656 int
1657 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1658         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1659         uint64_t *ids)
1660 {
1661         struct rte_eth_xstat_name *xstats_names_copy;
1662         unsigned int no_basic_stat_requested = 1;
1663         unsigned int no_ext_stat_requested = 1;
1664         unsigned int expected_entries;
1665         unsigned int basic_count;
1666         struct rte_eth_dev *dev;
1667         unsigned int i;
1668         int ret;
1669
1670         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1671         dev = &rte_eth_devices[port_id];
1672
1673         basic_count = get_xstats_basic_count(dev);
1674         ret = get_xstats_count(port_id);
1675         if (ret < 0)
1676                 return ret;
1677         expected_entries = (unsigned int)ret;
1678
1679         /* Return max number of stats if no ids given */
1680         if (!ids) {
1681                 if (!xstats_names)
1682                         return expected_entries;
1683                 else if (xstats_names && size < expected_entries)
1684                         return expected_entries;
1685         }
1686
1687         if (ids && !xstats_names)
1688                 return -EINVAL;
1689
1690         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1691                 uint64_t ids_copy[size];
1692
1693                 for (i = 0; i < size; i++) {
1694                         if (ids[i] < basic_count) {
1695                                 no_basic_stat_requested = 0;
1696                                 break;
1697                         }
1698
1699                         /*
1700                          * Convert ids to xstats ids that PMD knows.
1701                          * ids known by user are basic + extended stats.
1702                          */
1703                         ids_copy[i] = ids[i] - basic_count;
1704                 }
1705
1706                 if (no_basic_stat_requested)
1707                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
1708                                         xstats_names, ids_copy, size);
1709         }
1710
1711         /* Retrieve all stats */
1712         if (!ids) {
1713                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
1714                                 expected_entries);
1715                 if (num_stats < 0 || num_stats > (int)expected_entries)
1716                         return num_stats;
1717                 else
1718                         return expected_entries;
1719         }
1720
1721         xstats_names_copy = calloc(expected_entries,
1722                 sizeof(struct rte_eth_xstat_name));
1723
1724         if (!xstats_names_copy) {
1725                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
1726                 return -ENOMEM;
1727         }
1728
1729         if (ids) {
1730                 for (i = 0; i < size; i++) {
1731                         if (ids[i] > basic_count) {
1732                                 no_ext_stat_requested = 0;
1733                                 break;
1734                         }
1735                 }
1736         }
1737
1738         /* Fill xstats_names_copy structure */
1739         if (ids && no_ext_stat_requested) {
1740                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
1741         } else {
1742                 rte_eth_xstats_get_names(port_id, xstats_names_copy,
1743                         expected_entries);
1744         }
1745
1746         /* Filter stats */
1747         for (i = 0; i < size; i++) {
1748                 if (ids[i] >= expected_entries) {
1749                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1750                         free(xstats_names_copy);
1751                         return -1;
1752                 }
1753                 xstats_names[i] = xstats_names_copy[ids[i]];
1754         }
1755
1756         free(xstats_names_copy);
1757         return size;
1758 }
1759
1760 int
1761 rte_eth_xstats_get_names(uint16_t port_id,
1762         struct rte_eth_xstat_name *xstats_names,
1763         unsigned int size)
1764 {
1765         struct rte_eth_dev *dev;
1766         int cnt_used_entries;
1767         int cnt_expected_entries;
1768         int cnt_driver_entries;
1769
1770         cnt_expected_entries = get_xstats_count(port_id);
1771         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1772                         (int)size < cnt_expected_entries)
1773                 return cnt_expected_entries;
1774
1775         /* port_id checked in get_xstats_count() */
1776         dev = &rte_eth_devices[port_id];
1777
1778         cnt_used_entries = rte_eth_basic_stats_get_names(
1779                 dev, xstats_names);
1780
1781         if (dev->dev_ops->xstats_get_names != NULL) {
1782                 /* If there are any driver-specific xstats, append them
1783                  * to end of list.
1784                  */
1785                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1786                         dev,
1787                         xstats_names + cnt_used_entries,
1788                         size - cnt_used_entries);
1789                 if (cnt_driver_entries < 0)
1790                         return cnt_driver_entries;
1791                 cnt_used_entries += cnt_driver_entries;
1792         }
1793
1794         return cnt_used_entries;
1795 }
1796
1797
1798 static int
1799 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
1800 {
1801         struct rte_eth_dev *dev;
1802         struct rte_eth_stats eth_stats;
1803         unsigned int count = 0, i, q;
1804         uint64_t val, *stats_ptr;
1805         uint16_t nb_rxqs, nb_txqs;
1806
1807         rte_eth_stats_get(port_id, &eth_stats);
1808         dev = &rte_eth_devices[port_id];
1809
1810         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1811         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1812
1813         /* global stats */
1814         for (i = 0; i < RTE_NB_STATS; i++) {
1815                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1816                                         rte_stats_strings[i].offset);
1817                 val = *stats_ptr;
1818                 xstats[count++].value = val;
1819         }
1820
1821         /* per-rxq stats */
1822         for (q = 0; q < nb_rxqs; q++) {
1823                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1824                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1825                                         rte_rxq_stats_strings[i].offset +
1826                                         q * sizeof(uint64_t));
1827                         val = *stats_ptr;
1828                         xstats[count++].value = val;
1829                 }
1830         }
1831
1832         /* per-txq stats */
1833         for (q = 0; q < nb_txqs; q++) {
1834                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1835                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1836                                         rte_txq_stats_strings[i].offset +
1837                                         q * sizeof(uint64_t));
1838                         val = *stats_ptr;
1839                         xstats[count++].value = val;
1840                 }
1841         }
1842         return count;
1843 }
1844
1845 /* retrieve ethdev extended statistics */
1846 int
1847 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
1848                          uint64_t *values, unsigned int size)
1849 {
1850         unsigned int no_basic_stat_requested = 1;
1851         unsigned int no_ext_stat_requested = 1;
1852         unsigned int num_xstats_filled;
1853         unsigned int basic_count;
1854         uint16_t expected_entries;
1855         struct rte_eth_dev *dev;
1856         unsigned int i;
1857         int ret;
1858
1859         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1860         expected_entries = get_xstats_count(port_id);
1861         struct rte_eth_xstat xstats[expected_entries];
1862         dev = &rte_eth_devices[port_id];
1863         basic_count = get_xstats_basic_count(dev);
1864
1865         /* Return max number of stats if no ids given */
1866         if (!ids) {
1867                 if (!values)
1868                         return expected_entries;
1869                 else if (values && size < expected_entries)
1870                         return expected_entries;
1871         }
1872
1873         if (ids && !values)
1874                 return -EINVAL;
1875
1876         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
1877                 unsigned int basic_count = get_xstats_basic_count(dev);
1878                 uint64_t ids_copy[size];
1879
1880                 for (i = 0; i < size; i++) {
1881                         if (ids[i] < basic_count) {
1882                                 no_basic_stat_requested = 0;
1883                                 break;
1884                         }
1885
1886                         /*
1887                          * Convert ids to xstats ids that PMD knows.
1888                          * ids known by user are basic + extended stats.
1889                          */
1890                         ids_copy[i] = ids[i] - basic_count;
1891                 }
1892
1893                 if (no_basic_stat_requested)
1894                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
1895                                         values, size);
1896         }
1897
1898         if (ids) {
1899                 for (i = 0; i < size; i++) {
1900                         if (ids[i] > basic_count) {
1901                                 no_ext_stat_requested = 0;
1902                                 break;
1903                         }
1904                 }
1905         }
1906
1907         /* Fill the xstats structure */
1908         if (ids && no_ext_stat_requested)
1909                 ret = rte_eth_basic_stats_get(port_id, xstats);
1910         else
1911                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
1912
1913         if (ret < 0)
1914                 return ret;
1915         num_xstats_filled = (unsigned int)ret;
1916
1917         /* Return all stats */
1918         if (!ids) {
1919                 for (i = 0; i < num_xstats_filled; i++)
1920                         values[i] = xstats[i].value;
1921                 return expected_entries;
1922         }
1923
1924         /* Filter stats */
1925         for (i = 0; i < size; i++) {
1926                 if (ids[i] >= expected_entries) {
1927                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1928                         return -1;
1929                 }
1930                 values[i] = xstats[ids[i]].value;
1931         }
1932         return size;
1933 }
1934
1935 int
1936 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
1937         unsigned int n)
1938 {
1939         struct rte_eth_dev *dev;
1940         unsigned int count = 0, i;
1941         signed int xcount = 0;
1942         uint16_t nb_rxqs, nb_txqs;
1943
1944         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1945
1946         dev = &rte_eth_devices[port_id];
1947
1948         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1949         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1950
1951         /* Return generic statistics */
1952         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1953                 (nb_txqs * RTE_NB_TXQ_STATS);
1954
1955         /* implemented by the driver */
1956         if (dev->dev_ops->xstats_get != NULL) {
1957                 /* Retrieve the xstats from the driver at the end of the
1958                  * xstats struct.
1959                  */
1960                 xcount = (*dev->dev_ops->xstats_get)(dev,
1961                                      xstats ? xstats + count : NULL,
1962                                      (n > count) ? n - count : 0);
1963
1964                 if (xcount < 0)
1965                         return xcount;
1966         }
1967
1968         if (n < count + xcount || xstats == NULL)
1969                 return count + xcount;
1970
1971         /* now fill the xstats structure */
1972         count = rte_eth_basic_stats_get(port_id, xstats);
1973
1974         for (i = 0; i < count; i++)
1975                 xstats[i].id = i;
1976         /* add an offset to driver-specific stats */
1977         for ( ; i < count + xcount; i++)
1978                 xstats[i].id += count;
1979
1980         return count + xcount;
1981 }
1982
1983 /* reset ethdev extended statistics */
1984 void
1985 rte_eth_xstats_reset(uint16_t port_id)
1986 {
1987         struct rte_eth_dev *dev;
1988
1989         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1990         dev = &rte_eth_devices[port_id];
1991
1992         /* implemented by the driver */
1993         if (dev->dev_ops->xstats_reset != NULL) {
1994                 (*dev->dev_ops->xstats_reset)(dev);
1995                 return;
1996         }
1997
1998         /* fallback to default */
1999         rte_eth_stats_reset(port_id);
2000 }
2001
2002 static int
2003 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2004                 uint8_t is_rx)
2005 {
2006         struct rte_eth_dev *dev;
2007
2008         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2009
2010         dev = &rte_eth_devices[port_id];
2011
2012         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2013         return (*dev->dev_ops->queue_stats_mapping_set)
2014                         (dev, queue_id, stat_idx, is_rx);
2015 }
2016
2017
2018 int
2019 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2020                 uint8_t stat_idx)
2021 {
2022         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
2023                         STAT_QMAP_TX);
2024 }
2025
2026
2027 int
2028 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2029                 uint8_t stat_idx)
2030 {
2031         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
2032                         STAT_QMAP_RX);
2033 }
2034
2035 int
2036 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2037 {
2038         struct rte_eth_dev *dev;
2039
2040         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2041         dev = &rte_eth_devices[port_id];
2042
2043         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2044         return (*dev->dev_ops->fw_version_get)(dev, fw_version, fw_size);
2045 }
2046
2047 void
2048 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2049 {
2050         struct rte_eth_dev *dev;
2051         const struct rte_eth_desc_lim lim = {
2052                 .nb_max = UINT16_MAX,
2053                 .nb_min = 0,
2054                 .nb_align = 1,
2055         };
2056
2057         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2058         dev = &rte_eth_devices[port_id];
2059
2060         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2061         dev_info->rx_desc_lim = lim;
2062         dev_info->tx_desc_lim = lim;
2063
2064         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2065         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2066         dev_info->driver_name = dev->device->driver->name;
2067         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2068         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2069 }
2070
2071 int
2072 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2073                                  uint32_t *ptypes, int num)
2074 {
2075         int i, j;
2076         struct rte_eth_dev *dev;
2077         const uint32_t *all_ptypes;
2078
2079         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2080         dev = &rte_eth_devices[port_id];
2081         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2082         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2083
2084         if (!all_ptypes)
2085                 return 0;
2086
2087         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2088                 if (all_ptypes[i] & ptype_mask) {
2089                         if (j < num)
2090                                 ptypes[j] = all_ptypes[i];
2091                         j++;
2092                 }
2093
2094         return j;
2095 }
2096
2097 void
2098 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2099 {
2100         struct rte_eth_dev *dev;
2101
2102         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2103         dev = &rte_eth_devices[port_id];
2104         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2105 }
2106
2107
2108 int
2109 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2110 {
2111         struct rte_eth_dev *dev;
2112
2113         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2114
2115         dev = &rte_eth_devices[port_id];
2116         *mtu = dev->data->mtu;
2117         return 0;
2118 }
2119
2120 int
2121 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2122 {
2123         int ret;
2124         struct rte_eth_dev *dev;
2125
2126         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2127         dev = &rte_eth_devices[port_id];
2128         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2129
2130         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2131         if (!ret)
2132                 dev->data->mtu = mtu;
2133
2134         return ret;
2135 }
2136
2137 int
2138 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2139 {
2140         struct rte_eth_dev *dev;
2141         int ret;
2142
2143         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2144         dev = &rte_eth_devices[port_id];
2145         if (!(dev->data->dev_conf.rxmode.offloads &
2146               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2147                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2148                 return -ENOSYS;
2149         }
2150
2151         if (vlan_id > 4095) {
2152                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2153                                 port_id, (unsigned) vlan_id);
2154                 return -EINVAL;
2155         }
2156         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2157
2158         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2159         if (ret == 0) {
2160                 struct rte_vlan_filter_conf *vfc;
2161                 int vidx;
2162                 int vbit;
2163
2164                 vfc = &dev->data->vlan_filter_conf;
2165                 vidx = vlan_id / 64;
2166                 vbit = vlan_id % 64;
2167
2168                 if (on)
2169                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2170                 else
2171                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2172         }
2173
2174         return ret;
2175 }
2176
2177 int
2178 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2179                                     int on)
2180 {
2181         struct rte_eth_dev *dev;
2182
2183         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2184         dev = &rte_eth_devices[port_id];
2185         if (rx_queue_id >= dev->data->nb_rx_queues) {
2186                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2187                 return -EINVAL;
2188         }
2189
2190         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2191         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2192
2193         return 0;
2194 }
2195
2196 int
2197 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2198                                 enum rte_vlan_type vlan_type,
2199                                 uint16_t tpid)
2200 {
2201         struct rte_eth_dev *dev;
2202
2203         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2204         dev = &rte_eth_devices[port_id];
2205         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2206
2207         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
2208 }
2209
2210 int
2211 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2212 {
2213         struct rte_eth_dev *dev;
2214         int ret = 0;
2215         int mask = 0;
2216         int cur, org = 0;
2217         uint64_t orig_offloads;
2218
2219         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2220         dev = &rte_eth_devices[port_id];
2221
2222         /* save original values in case of failure */
2223         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2224
2225         /*check which option changed by application*/
2226         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2227         org = !!(dev->data->dev_conf.rxmode.offloads &
2228                  DEV_RX_OFFLOAD_VLAN_STRIP);
2229         if (cur != org) {
2230                 if (cur)
2231                         dev->data->dev_conf.rxmode.offloads |=
2232                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2233                 else
2234                         dev->data->dev_conf.rxmode.offloads &=
2235                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2236                 mask |= ETH_VLAN_STRIP_MASK;
2237         }
2238
2239         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2240         org = !!(dev->data->dev_conf.rxmode.offloads &
2241                  DEV_RX_OFFLOAD_VLAN_FILTER);
2242         if (cur != org) {
2243                 if (cur)
2244                         dev->data->dev_conf.rxmode.offloads |=
2245                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2246                 else
2247                         dev->data->dev_conf.rxmode.offloads &=
2248                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2249                 mask |= ETH_VLAN_FILTER_MASK;
2250         }
2251
2252         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2253         org = !!(dev->data->dev_conf.rxmode.offloads &
2254                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2255         if (cur != org) {
2256                 if (cur)
2257                         dev->data->dev_conf.rxmode.offloads |=
2258                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2259                 else
2260                         dev->data->dev_conf.rxmode.offloads &=
2261                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2262                 mask |= ETH_VLAN_EXTEND_MASK;
2263         }
2264
2265         /*no change*/
2266         if (mask == 0)
2267                 return ret;
2268
2269         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2270
2271         /*
2272          * Convert to the offload bitfield API just in case the underlying PMD
2273          * still supporting it.
2274          */
2275         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2276                                     &dev->data->dev_conf.rxmode);
2277         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2278         if (ret) {
2279                 /* hit an error restore  original values */
2280                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2281                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2282                                             &dev->data->dev_conf.rxmode);
2283         }
2284
2285         return ret;
2286 }
2287
2288 int
2289 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2290 {
2291         struct rte_eth_dev *dev;
2292         int ret = 0;
2293
2294         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2295         dev = &rte_eth_devices[port_id];
2296
2297         if (dev->data->dev_conf.rxmode.offloads &
2298             DEV_RX_OFFLOAD_VLAN_STRIP)
2299                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2300
2301         if (dev->data->dev_conf.rxmode.offloads &
2302             DEV_RX_OFFLOAD_VLAN_FILTER)
2303                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2304
2305         if (dev->data->dev_conf.rxmode.offloads &
2306             DEV_RX_OFFLOAD_VLAN_EXTEND)
2307                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2308
2309         return ret;
2310 }
2311
2312 int
2313 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2314 {
2315         struct rte_eth_dev *dev;
2316
2317         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2318         dev = &rte_eth_devices[port_id];
2319         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2320         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
2321
2322         return 0;
2323 }
2324
2325 int
2326 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2327 {
2328         struct rte_eth_dev *dev;
2329
2330         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2331         dev = &rte_eth_devices[port_id];
2332         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2333         memset(fc_conf, 0, sizeof(*fc_conf));
2334         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2335 }
2336
2337 int
2338 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2339 {
2340         struct rte_eth_dev *dev;
2341
2342         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2343         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2344                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2345                 return -EINVAL;
2346         }
2347
2348         dev = &rte_eth_devices[port_id];
2349         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2350         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2351 }
2352
2353 int
2354 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2355                                    struct rte_eth_pfc_conf *pfc_conf)
2356 {
2357         struct rte_eth_dev *dev;
2358
2359         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2360         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2361                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2362                 return -EINVAL;
2363         }
2364
2365         dev = &rte_eth_devices[port_id];
2366         /* High water, low water validation are device specific */
2367         if  (*dev->dev_ops->priority_flow_ctrl_set)
2368                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2369         return -ENOTSUP;
2370 }
2371
2372 static int
2373 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2374                         uint16_t reta_size)
2375 {
2376         uint16_t i, num;
2377
2378         if (!reta_conf)
2379                 return -EINVAL;
2380
2381         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2382         for (i = 0; i < num; i++) {
2383                 if (reta_conf[i].mask)
2384                         return 0;
2385         }
2386
2387         return -EINVAL;
2388 }
2389
2390 static int
2391 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2392                          uint16_t reta_size,
2393                          uint16_t max_rxq)
2394 {
2395         uint16_t i, idx, shift;
2396
2397         if (!reta_conf)
2398                 return -EINVAL;
2399
2400         if (max_rxq == 0) {
2401                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2402                 return -EINVAL;
2403         }
2404
2405         for (i = 0; i < reta_size; i++) {
2406                 idx = i / RTE_RETA_GROUP_SIZE;
2407                 shift = i % RTE_RETA_GROUP_SIZE;
2408                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2409                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2410                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2411                                 "the maximum rxq index: %u\n", idx, shift,
2412                                 reta_conf[idx].reta[shift], max_rxq);
2413                         return -EINVAL;
2414                 }
2415         }
2416
2417         return 0;
2418 }
2419
2420 int
2421 rte_eth_dev_rss_reta_update(uint16_t port_id,
2422                             struct rte_eth_rss_reta_entry64 *reta_conf,
2423                             uint16_t reta_size)
2424 {
2425         struct rte_eth_dev *dev;
2426         int ret;
2427
2428         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2429         /* Check mask bits */
2430         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2431         if (ret < 0)
2432                 return ret;
2433
2434         dev = &rte_eth_devices[port_id];
2435
2436         /* Check entry value */
2437         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2438                                 dev->data->nb_rx_queues);
2439         if (ret < 0)
2440                 return ret;
2441
2442         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2443         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2444 }
2445
2446 int
2447 rte_eth_dev_rss_reta_query(uint16_t port_id,
2448                            struct rte_eth_rss_reta_entry64 *reta_conf,
2449                            uint16_t reta_size)
2450 {
2451         struct rte_eth_dev *dev;
2452         int ret;
2453
2454         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2455
2456         /* Check mask bits */
2457         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2458         if (ret < 0)
2459                 return ret;
2460
2461         dev = &rte_eth_devices[port_id];
2462         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2463         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2464 }
2465
2466 int
2467 rte_eth_dev_rss_hash_update(uint16_t port_id,
2468                             struct rte_eth_rss_conf *rss_conf)
2469 {
2470         struct rte_eth_dev *dev;
2471
2472         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2473         dev = &rte_eth_devices[port_id];
2474         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2475         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2476 }
2477
2478 int
2479 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2480                               struct rte_eth_rss_conf *rss_conf)
2481 {
2482         struct rte_eth_dev *dev;
2483
2484         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2485         dev = &rte_eth_devices[port_id];
2486         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2487         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2488 }
2489
2490 int
2491 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2492                                 struct rte_eth_udp_tunnel *udp_tunnel)
2493 {
2494         struct rte_eth_dev *dev;
2495
2496         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2497         if (udp_tunnel == NULL) {
2498                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2499                 return -EINVAL;
2500         }
2501
2502         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2503                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2504                 return -EINVAL;
2505         }
2506
2507         dev = &rte_eth_devices[port_id];
2508         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2509         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
2510 }
2511
2512 int
2513 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2514                                    struct rte_eth_udp_tunnel *udp_tunnel)
2515 {
2516         struct rte_eth_dev *dev;
2517
2518         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2519         dev = &rte_eth_devices[port_id];
2520
2521         if (udp_tunnel == NULL) {
2522                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2523                 return -EINVAL;
2524         }
2525
2526         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2527                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2528                 return -EINVAL;
2529         }
2530
2531         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2532         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2533 }
2534
2535 int
2536 rte_eth_led_on(uint16_t port_id)
2537 {
2538         struct rte_eth_dev *dev;
2539
2540         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2541         dev = &rte_eth_devices[port_id];
2542         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2543         return (*dev->dev_ops->dev_led_on)(dev);
2544 }
2545
2546 int
2547 rte_eth_led_off(uint16_t port_id)
2548 {
2549         struct rte_eth_dev *dev;
2550
2551         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2552         dev = &rte_eth_devices[port_id];
2553         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2554         return (*dev->dev_ops->dev_led_off)(dev);
2555 }
2556
2557 /*
2558  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2559  * an empty spot.
2560  */
2561 static int
2562 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2563 {
2564         struct rte_eth_dev_info dev_info;
2565         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2566         unsigned i;
2567
2568         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2569         rte_eth_dev_info_get(port_id, &dev_info);
2570
2571         for (i = 0; i < dev_info.max_mac_addrs; i++)
2572                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2573                         return i;
2574
2575         return -1;
2576 }
2577
2578 static const struct ether_addr null_mac_addr;
2579
2580 int
2581 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2582                         uint32_t pool)
2583 {
2584         struct rte_eth_dev *dev;
2585         int index;
2586         uint64_t pool_mask;
2587         int ret;
2588
2589         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2590         dev = &rte_eth_devices[port_id];
2591         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2592
2593         if (is_zero_ether_addr(addr)) {
2594                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2595                         port_id);
2596                 return -EINVAL;
2597         }
2598         if (pool >= ETH_64_POOLS) {
2599                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2600                 return -EINVAL;
2601         }
2602
2603         index = get_mac_addr_index(port_id, addr);
2604         if (index < 0) {
2605                 index = get_mac_addr_index(port_id, &null_mac_addr);
2606                 if (index < 0) {
2607                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2608                                 port_id);
2609                         return -ENOSPC;
2610                 }
2611         } else {
2612                 pool_mask = dev->data->mac_pool_sel[index];
2613
2614                 /* Check if both MAC address and pool is already there, and do nothing */
2615                 if (pool_mask & (1ULL << pool))
2616                         return 0;
2617         }
2618
2619         /* Update NIC */
2620         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2621
2622         if (ret == 0) {
2623                 /* Update address in NIC data structure */
2624                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2625
2626                 /* Update pool bitmap in NIC data structure */
2627                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2628         }
2629
2630         return ret;
2631 }
2632
2633 int
2634 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2635 {
2636         struct rte_eth_dev *dev;
2637         int index;
2638
2639         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2640         dev = &rte_eth_devices[port_id];
2641         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2642
2643         index = get_mac_addr_index(port_id, addr);
2644         if (index == 0) {
2645                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2646                 return -EADDRINUSE;
2647         } else if (index < 0)
2648                 return 0;  /* Do nothing if address wasn't found */
2649
2650         /* Update NIC */
2651         (*dev->dev_ops->mac_addr_remove)(dev, index);
2652
2653         /* Update address in NIC data structure */
2654         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2655
2656         /* reset pool bitmap */
2657         dev->data->mac_pool_sel[index] = 0;
2658
2659         return 0;
2660 }
2661
2662 int
2663 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2664 {
2665         struct rte_eth_dev *dev;
2666
2667         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2668
2669         if (!is_valid_assigned_ether_addr(addr))
2670                 return -EINVAL;
2671
2672         dev = &rte_eth_devices[port_id];
2673         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2674
2675         /* Update default address in NIC data structure */
2676         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2677
2678         (*dev->dev_ops->mac_addr_set)(dev, addr);
2679
2680         return 0;
2681 }
2682
2683
2684 /*
2685  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2686  * an empty spot.
2687  */
2688 static int
2689 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2690 {
2691         struct rte_eth_dev_info dev_info;
2692         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2693         unsigned i;
2694
2695         rte_eth_dev_info_get(port_id, &dev_info);
2696         if (!dev->data->hash_mac_addrs)
2697                 return -1;
2698
2699         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2700                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2701                         ETHER_ADDR_LEN) == 0)
2702                         return i;
2703
2704         return -1;
2705 }
2706
2707 int
2708 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2709                                 uint8_t on)
2710 {
2711         int index;
2712         int ret;
2713         struct rte_eth_dev *dev;
2714
2715         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2716
2717         dev = &rte_eth_devices[port_id];
2718         if (is_zero_ether_addr(addr)) {
2719                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2720                         port_id);
2721                 return -EINVAL;
2722         }
2723
2724         index = get_hash_mac_addr_index(port_id, addr);
2725         /* Check if it's already there, and do nothing */
2726         if ((index >= 0) && on)
2727                 return 0;
2728
2729         if (index < 0) {
2730                 if (!on) {
2731                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2732                                 "set in UTA\n", port_id);
2733                         return -EINVAL;
2734                 }
2735
2736                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2737                 if (index < 0) {
2738                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2739                                         port_id);
2740                         return -ENOSPC;
2741                 }
2742         }
2743
2744         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2745         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2746         if (ret == 0) {
2747                 /* Update address in NIC data structure */
2748                 if (on)
2749                         ether_addr_copy(addr,
2750                                         &dev->data->hash_mac_addrs[index]);
2751                 else
2752                         ether_addr_copy(&null_mac_addr,
2753                                         &dev->data->hash_mac_addrs[index]);
2754         }
2755
2756         return ret;
2757 }
2758
2759 int
2760 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
2761 {
2762         struct rte_eth_dev *dev;
2763
2764         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2765
2766         dev = &rte_eth_devices[port_id];
2767
2768         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2769         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2770 }
2771
2772 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
2773                                         uint16_t tx_rate)
2774 {
2775         struct rte_eth_dev *dev;
2776         struct rte_eth_dev_info dev_info;
2777         struct rte_eth_link link;
2778
2779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2780
2781         dev = &rte_eth_devices[port_id];
2782         rte_eth_dev_info_get(port_id, &dev_info);
2783         link = dev->data->dev_link;
2784
2785         if (queue_idx > dev_info.max_tx_queues) {
2786                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2787                                 "invalid queue id=%d\n", port_id, queue_idx);
2788                 return -EINVAL;
2789         }
2790
2791         if (tx_rate > link.link_speed) {
2792                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2793                                 "bigger than link speed= %d\n",
2794                         tx_rate, link.link_speed);
2795                 return -EINVAL;
2796         }
2797
2798         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2799         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2800 }
2801
2802 int
2803 rte_eth_mirror_rule_set(uint16_t port_id,
2804                         struct rte_eth_mirror_conf *mirror_conf,
2805                         uint8_t rule_id, uint8_t on)
2806 {
2807         struct rte_eth_dev *dev;
2808
2809         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2810         if (mirror_conf->rule_type == 0) {
2811                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2812                 return -EINVAL;
2813         }
2814
2815         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2816                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2817                                 ETH_64_POOLS - 1);
2818                 return -EINVAL;
2819         }
2820
2821         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2822              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2823             (mirror_conf->pool_mask == 0)) {
2824                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2825                 return -EINVAL;
2826         }
2827
2828         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2829             mirror_conf->vlan.vlan_mask == 0) {
2830                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2831                 return -EINVAL;
2832         }
2833
2834         dev = &rte_eth_devices[port_id];
2835         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2836
2837         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2838 }
2839
2840 int
2841 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
2842 {
2843         struct rte_eth_dev *dev;
2844
2845         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2846
2847         dev = &rte_eth_devices[port_id];
2848         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2849
2850         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2851 }
2852
2853 int
2854 rte_eth_dev_callback_register(uint16_t port_id,
2855                         enum rte_eth_event_type event,
2856                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2857 {
2858         struct rte_eth_dev *dev;
2859         struct rte_eth_dev_callback *user_cb;
2860
2861         if (!cb_fn)
2862                 return -EINVAL;
2863
2864         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2865
2866         dev = &rte_eth_devices[port_id];
2867         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2868
2869         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2870                 if (user_cb->cb_fn == cb_fn &&
2871                         user_cb->cb_arg == cb_arg &&
2872                         user_cb->event == event) {
2873                         break;
2874                 }
2875         }
2876
2877         /* create a new callback. */
2878         if (user_cb == NULL) {
2879                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2880                                         sizeof(struct rte_eth_dev_callback), 0);
2881                 if (user_cb != NULL) {
2882                         user_cb->cb_fn = cb_fn;
2883                         user_cb->cb_arg = cb_arg;
2884                         user_cb->event = event;
2885                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2886                 }
2887         }
2888
2889         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2890         return (user_cb == NULL) ? -ENOMEM : 0;
2891 }
2892
2893 int
2894 rte_eth_dev_callback_unregister(uint16_t port_id,
2895                         enum rte_eth_event_type event,
2896                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2897 {
2898         int ret;
2899         struct rte_eth_dev *dev;
2900         struct rte_eth_dev_callback *cb, *next;
2901
2902         if (!cb_fn)
2903                 return -EINVAL;
2904
2905         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2906
2907         dev = &rte_eth_devices[port_id];
2908         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2909
2910         ret = 0;
2911         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2912
2913                 next = TAILQ_NEXT(cb, next);
2914
2915                 if (cb->cb_fn != cb_fn || cb->event != event ||
2916                                 (cb->cb_arg != (void *)-1 &&
2917                                 cb->cb_arg != cb_arg))
2918                         continue;
2919
2920                 /*
2921                  * if this callback is not executing right now,
2922                  * then remove it.
2923                  */
2924                 if (cb->active == 0) {
2925                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2926                         rte_free(cb);
2927                 } else {
2928                         ret = -EAGAIN;
2929                 }
2930         }
2931
2932         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2933         return ret;
2934 }
2935
2936 int
2937 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2938         enum rte_eth_event_type event, void *cb_arg, void *ret_param)
2939 {
2940         struct rte_eth_dev_callback *cb_lst;
2941         struct rte_eth_dev_callback dev_cb;
2942         int rc = 0;
2943
2944         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2945         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2946                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2947                         continue;
2948                 dev_cb = *cb_lst;
2949                 cb_lst->active = 1;
2950                 if (cb_arg != NULL)
2951                         dev_cb.cb_arg = cb_arg;
2952                 if (ret_param != NULL)
2953                         dev_cb.ret_param = ret_param;
2954
2955                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2956                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2957                                 dev_cb.cb_arg, dev_cb.ret_param);
2958                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2959                 cb_lst->active = 0;
2960         }
2961         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2962         return rc;
2963 }
2964
2965 int
2966 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
2967 {
2968         uint32_t vec;
2969         struct rte_eth_dev *dev;
2970         struct rte_intr_handle *intr_handle;
2971         uint16_t qid;
2972         int rc;
2973
2974         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2975
2976         dev = &rte_eth_devices[port_id];
2977
2978         if (!dev->intr_handle) {
2979                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
2980                 return -ENOTSUP;
2981         }
2982
2983         intr_handle = dev->intr_handle;
2984         if (!intr_handle->intr_vec) {
2985                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2986                 return -EPERM;
2987         }
2988
2989         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2990                 vec = intr_handle->intr_vec[qid];
2991                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2992                 if (rc && rc != -EEXIST) {
2993                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2994                                         " op %d epfd %d vec %u\n",
2995                                         port_id, qid, op, epfd, vec);
2996                 }
2997         }
2998
2999         return 0;
3000 }
3001
3002 const struct rte_memzone *
3003 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3004                          uint16_t queue_id, size_t size, unsigned align,
3005                          int socket_id)
3006 {
3007         char z_name[RTE_MEMZONE_NAMESIZE];
3008         const struct rte_memzone *mz;
3009
3010         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3011                  dev->device->driver->name, ring_name,
3012                  dev->data->port_id, queue_id);
3013
3014         mz = rte_memzone_lookup(z_name);
3015         if (mz)
3016                 return mz;
3017
3018         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
3019 }
3020
3021 int
3022 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3023                           int epfd, int op, void *data)
3024 {
3025         uint32_t vec;
3026         struct rte_eth_dev *dev;
3027         struct rte_intr_handle *intr_handle;
3028         int rc;
3029
3030         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3031
3032         dev = &rte_eth_devices[port_id];
3033         if (queue_id >= dev->data->nb_rx_queues) {
3034                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3035                 return -EINVAL;
3036         }
3037
3038         if (!dev->intr_handle) {
3039                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3040                 return -ENOTSUP;
3041         }
3042
3043         intr_handle = dev->intr_handle;
3044         if (!intr_handle->intr_vec) {
3045                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3046                 return -EPERM;
3047         }
3048
3049         vec = intr_handle->intr_vec[queue_id];
3050         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3051         if (rc && rc != -EEXIST) {
3052                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3053                                 " op %d epfd %d vec %u\n",
3054                                 port_id, queue_id, op, epfd, vec);
3055                 return rc;
3056         }
3057
3058         return 0;
3059 }
3060
3061 int
3062 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3063                            uint16_t queue_id)
3064 {
3065         struct rte_eth_dev *dev;
3066
3067         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3068
3069         dev = &rte_eth_devices[port_id];
3070
3071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3072         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
3073 }
3074
3075 int
3076 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3077                             uint16_t queue_id)
3078 {
3079         struct rte_eth_dev *dev;
3080
3081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3082
3083         dev = &rte_eth_devices[port_id];
3084
3085         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3086         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
3087 }
3088
3089
3090 int
3091 rte_eth_dev_filter_supported(uint16_t port_id,
3092                              enum rte_filter_type filter_type)
3093 {
3094         struct rte_eth_dev *dev;
3095
3096         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3097
3098         dev = &rte_eth_devices[port_id];
3099         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3100         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3101                                 RTE_ETH_FILTER_NOP, NULL);
3102 }
3103
3104 int
3105 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3106                        enum rte_filter_op filter_op, void *arg)
3107 {
3108         struct rte_eth_dev *dev;
3109
3110         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3111
3112         dev = &rte_eth_devices[port_id];
3113         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3114         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3115 }
3116
3117 void *
3118 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3119                 rte_rx_callback_fn fn, void *user_param)
3120 {
3121 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3122         rte_errno = ENOTSUP;
3123         return NULL;
3124 #endif
3125         /* check input parameters */
3126         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3127                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3128                 rte_errno = EINVAL;
3129                 return NULL;
3130         }
3131         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3132
3133         if (cb == NULL) {
3134                 rte_errno = ENOMEM;
3135                 return NULL;
3136         }
3137
3138         cb->fn.rx = fn;
3139         cb->param = user_param;
3140
3141         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3142         /* Add the callbacks in fifo order. */
3143         struct rte_eth_rxtx_callback *tail =
3144                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3145
3146         if (!tail) {
3147                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3148
3149         } else {
3150                 while (tail->next)
3151                         tail = tail->next;
3152                 tail->next = cb;
3153         }
3154         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3155
3156         return cb;
3157 }
3158
3159 void *
3160 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3161                 rte_rx_callback_fn fn, void *user_param)
3162 {
3163 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3164         rte_errno = ENOTSUP;
3165         return NULL;
3166 #endif
3167         /* check input parameters */
3168         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3169                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3170                 rte_errno = EINVAL;
3171                 return NULL;
3172         }
3173
3174         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3175
3176         if (cb == NULL) {
3177                 rte_errno = ENOMEM;
3178                 return NULL;
3179         }
3180
3181         cb->fn.rx = fn;
3182         cb->param = user_param;
3183
3184         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3185         /* Add the callbacks at fisrt position*/
3186         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3187         rte_smp_wmb();
3188         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3189         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3190
3191         return cb;
3192 }
3193
3194 void *
3195 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3196                 rte_tx_callback_fn fn, void *user_param)
3197 {
3198 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3199         rte_errno = ENOTSUP;
3200         return NULL;
3201 #endif
3202         /* check input parameters */
3203         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3204                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3205                 rte_errno = EINVAL;
3206                 return NULL;
3207         }
3208
3209         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3210
3211         if (cb == NULL) {
3212                 rte_errno = ENOMEM;
3213                 return NULL;
3214         }
3215
3216         cb->fn.tx = fn;
3217         cb->param = user_param;
3218
3219         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3220         /* Add the callbacks in fifo order. */
3221         struct rte_eth_rxtx_callback *tail =
3222                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3223
3224         if (!tail) {
3225                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3226
3227         } else {
3228                 while (tail->next)
3229                         tail = tail->next;
3230                 tail->next = cb;
3231         }
3232         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3233
3234         return cb;
3235 }
3236
3237 int
3238 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3239                 struct rte_eth_rxtx_callback *user_cb)
3240 {
3241 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3242         return -ENOTSUP;
3243 #endif
3244         /* Check input parameters. */
3245         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3246         if (user_cb == NULL ||
3247                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3248                 return -EINVAL;
3249
3250         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3251         struct rte_eth_rxtx_callback *cb;
3252         struct rte_eth_rxtx_callback **prev_cb;
3253         int ret = -EINVAL;
3254
3255         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3256         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3257         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3258                 cb = *prev_cb;
3259                 if (cb == user_cb) {
3260                         /* Remove the user cb from the callback list. */
3261                         *prev_cb = cb->next;
3262                         ret = 0;
3263                         break;
3264                 }
3265         }
3266         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3267
3268         return ret;
3269 }
3270
3271 int
3272 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3273                 struct rte_eth_rxtx_callback *user_cb)
3274 {
3275 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3276         return -ENOTSUP;
3277 #endif
3278         /* Check input parameters. */
3279         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3280         if (user_cb == NULL ||
3281                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3282                 return -EINVAL;
3283
3284         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3285         int ret = -EINVAL;
3286         struct rte_eth_rxtx_callback *cb;
3287         struct rte_eth_rxtx_callback **prev_cb;
3288
3289         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3290         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3291         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3292                 cb = *prev_cb;
3293                 if (cb == user_cb) {
3294                         /* Remove the user cb from the callback list. */
3295                         *prev_cb = cb->next;
3296                         ret = 0;
3297                         break;
3298                 }
3299         }
3300         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3301
3302         return ret;
3303 }
3304
3305 int
3306 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3307         struct rte_eth_rxq_info *qinfo)
3308 {
3309         struct rte_eth_dev *dev;
3310
3311         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3312
3313         if (qinfo == NULL)
3314                 return -EINVAL;
3315
3316         dev = &rte_eth_devices[port_id];
3317         if (queue_id >= dev->data->nb_rx_queues) {
3318                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3319                 return -EINVAL;
3320         }
3321
3322         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3323
3324         memset(qinfo, 0, sizeof(*qinfo));
3325         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3326         return 0;
3327 }
3328
3329 int
3330 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3331         struct rte_eth_txq_info *qinfo)
3332 {
3333         struct rte_eth_dev *dev;
3334
3335         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3336
3337         if (qinfo == NULL)
3338                 return -EINVAL;
3339
3340         dev = &rte_eth_devices[port_id];
3341         if (queue_id >= dev->data->nb_tx_queues) {
3342                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3343                 return -EINVAL;
3344         }
3345
3346         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3347
3348         memset(qinfo, 0, sizeof(*qinfo));
3349         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3350         return 0;
3351 }
3352
3353 int
3354 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3355                              struct ether_addr *mc_addr_set,
3356                              uint32_t nb_mc_addr)
3357 {
3358         struct rte_eth_dev *dev;
3359
3360         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3361
3362         dev = &rte_eth_devices[port_id];
3363         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3364         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3365 }
3366
3367 int
3368 rte_eth_timesync_enable(uint16_t port_id)
3369 {
3370         struct rte_eth_dev *dev;
3371
3372         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3373         dev = &rte_eth_devices[port_id];
3374
3375         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3376         return (*dev->dev_ops->timesync_enable)(dev);
3377 }
3378
3379 int
3380 rte_eth_timesync_disable(uint16_t port_id)
3381 {
3382         struct rte_eth_dev *dev;
3383
3384         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3385         dev = &rte_eth_devices[port_id];
3386
3387         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3388         return (*dev->dev_ops->timesync_disable)(dev);
3389 }
3390
3391 int
3392 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3393                                    uint32_t flags)
3394 {
3395         struct rte_eth_dev *dev;
3396
3397         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3398         dev = &rte_eth_devices[port_id];
3399
3400         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3401         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3402 }
3403
3404 int
3405 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3406                                    struct timespec *timestamp)
3407 {
3408         struct rte_eth_dev *dev;
3409
3410         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3411         dev = &rte_eth_devices[port_id];
3412
3413         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3414         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3415 }
3416
3417 int
3418 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3419 {
3420         struct rte_eth_dev *dev;
3421
3422         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3423         dev = &rte_eth_devices[port_id];
3424
3425         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3426         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3427 }
3428
3429 int
3430 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3431 {
3432         struct rte_eth_dev *dev;
3433
3434         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3435         dev = &rte_eth_devices[port_id];
3436
3437         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3438         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3439 }
3440
3441 int
3442 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3443 {
3444         struct rte_eth_dev *dev;
3445
3446         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3447         dev = &rte_eth_devices[port_id];
3448
3449         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3450         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3451 }
3452
3453 int
3454 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3455 {
3456         struct rte_eth_dev *dev;
3457
3458         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3459
3460         dev = &rte_eth_devices[port_id];
3461         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3462         return (*dev->dev_ops->get_reg)(dev, info);
3463 }
3464
3465 int
3466 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3467 {
3468         struct rte_eth_dev *dev;
3469
3470         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3471
3472         dev = &rte_eth_devices[port_id];
3473         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3474         return (*dev->dev_ops->get_eeprom_length)(dev);
3475 }
3476
3477 int
3478 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3479 {
3480         struct rte_eth_dev *dev;
3481
3482         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3483
3484         dev = &rte_eth_devices[port_id];
3485         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3486         return (*dev->dev_ops->get_eeprom)(dev, info);
3487 }
3488
3489 int
3490 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3491 {
3492         struct rte_eth_dev *dev;
3493
3494         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3495
3496         dev = &rte_eth_devices[port_id];
3497         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3498         return (*dev->dev_ops->set_eeprom)(dev, info);
3499 }
3500
3501 int
3502 rte_eth_dev_get_dcb_info(uint16_t port_id,
3503                              struct rte_eth_dcb_info *dcb_info)
3504 {
3505         struct rte_eth_dev *dev;
3506
3507         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3508
3509         dev = &rte_eth_devices[port_id];
3510         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3511
3512         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3513         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3514 }
3515
3516 int
3517 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3518                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3519 {
3520         struct rte_eth_dev *dev;
3521
3522         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3523         if (l2_tunnel == NULL) {
3524                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3525                 return -EINVAL;
3526         }
3527
3528         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3529                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3530                 return -EINVAL;
3531         }
3532
3533         dev = &rte_eth_devices[port_id];
3534         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3535                                 -ENOTSUP);
3536         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3537 }
3538
3539 int
3540 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3541                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3542                                   uint32_t mask,
3543                                   uint8_t en)
3544 {
3545         struct rte_eth_dev *dev;
3546
3547         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3548
3549         if (l2_tunnel == NULL) {
3550                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3551                 return -EINVAL;
3552         }
3553
3554         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3555                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3556                 return -EINVAL;
3557         }
3558
3559         if (mask == 0) {
3560                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3561                 return -EINVAL;
3562         }
3563
3564         dev = &rte_eth_devices[port_id];
3565         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3566                                 -ENOTSUP);
3567         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3568 }
3569
3570 static void
3571 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3572                            const struct rte_eth_desc_lim *desc_lim)
3573 {
3574         if (desc_lim->nb_align != 0)
3575                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3576
3577         if (desc_lim->nb_max != 0)
3578                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3579
3580         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3581 }
3582
3583 int
3584 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3585                                  uint16_t *nb_rx_desc,
3586                                  uint16_t *nb_tx_desc)
3587 {
3588         struct rte_eth_dev *dev;
3589         struct rte_eth_dev_info dev_info;
3590
3591         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3592
3593         dev = &rte_eth_devices[port_id];
3594         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3595
3596         rte_eth_dev_info_get(port_id, &dev_info);
3597
3598         if (nb_rx_desc != NULL)
3599                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3600
3601         if (nb_tx_desc != NULL)
3602                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3603
3604         return 0;
3605 }
3606
3607 int
3608 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3609 {
3610         struct rte_eth_dev *dev;
3611
3612         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3613
3614         if (pool == NULL)
3615                 return -EINVAL;
3616
3617         dev = &rte_eth_devices[port_id];
3618
3619         if (*dev->dev_ops->pool_ops_supported == NULL)
3620                 return 1; /* all pools are supported */
3621
3622         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
3623 }