9751e93d5760fa57e39fae6701b60d51567b3147
[dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37
38 #include "rte_ether.h"
39 #include "rte_ethdev.h"
40 #include "ethdev_profile.h"
41
42 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
43 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
44 static struct rte_eth_dev_data *rte_eth_dev_data;
45 static uint8_t eth_dev_last_created_port;
46
47 /* spinlock for eth device callbacks */
48 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* spinlock for add/remove rx callbacks */
51 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
52
53 /* spinlock for add/remove tx callbacks */
54 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* store statistics names and its offset in stats structure  */
57 struct rte_eth_xstats_name_off {
58         char name[RTE_ETH_XSTATS_NAME_SIZE];
59         unsigned offset;
60 };
61
62 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
63         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
64         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
65         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
66         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
67         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
68         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
69         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
70         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
71                 rx_nombuf)},
72 };
73
74 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
75
76 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
77         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
78         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
79         {"errors", offsetof(struct rte_eth_stats, q_errors)},
80 };
81
82 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
83                 sizeof(rte_rxq_stats_strings[0]))
84
85 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
86         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
87         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
88 };
89 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
90                 sizeof(rte_txq_stats_strings[0]))
91
92
93 /**
94  * The user application callback description.
95  *
96  * It contains callback address to be registered by user application,
97  * the pointer to the parameters for callback, and the event type.
98  */
99 struct rte_eth_dev_callback {
100         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
101         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
102         void *cb_arg;                           /**< Parameter for callback */
103         void *ret_param;                        /**< Return parameter */
104         enum rte_eth_event_type event;          /**< Interrupt event type */
105         uint32_t active;                        /**< Callback is executing */
106 };
107
108 enum {
109         STAT_QMAP_TX = 0,
110         STAT_QMAP_RX
111 };
112
113 uint16_t
114 rte_eth_find_next(uint16_t port_id)
115 {
116         while (port_id < RTE_MAX_ETHPORTS &&
117                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
118                 port_id++;
119
120         if (port_id >= RTE_MAX_ETHPORTS)
121                 return RTE_MAX_ETHPORTS;
122
123         return port_id;
124 }
125
126 static void
127 rte_eth_dev_data_alloc(void)
128 {
129         const unsigned flags = 0;
130         const struct rte_memzone *mz;
131
132         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
133                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
134                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
135                                 rte_socket_id(), flags);
136         } else
137                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
138         if (mz == NULL)
139                 rte_panic("Cannot allocate memzone for ethernet port data\n");
140
141         rte_eth_dev_data = mz->addr;
142         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
143                 memset(rte_eth_dev_data, 0,
144                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
145 }
146
147 struct rte_eth_dev *
148 rte_eth_dev_allocated(const char *name)
149 {
150         unsigned i;
151
152         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
153                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
154                     strcmp(rte_eth_devices[i].data->name, name) == 0)
155                         return &rte_eth_devices[i];
156         }
157         return NULL;
158 }
159
160 static uint16_t
161 rte_eth_dev_find_free_port(void)
162 {
163         unsigned i;
164
165         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
166                 if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED)
167                         return i;
168         }
169         return RTE_MAX_ETHPORTS;
170 }
171
172 static struct rte_eth_dev *
173 eth_dev_get(uint16_t port_id)
174 {
175         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
176
177         eth_dev->data = &rte_eth_dev_data[port_id];
178         eth_dev->state = RTE_ETH_DEV_ATTACHED;
179         TAILQ_INIT(&(eth_dev->link_intr_cbs));
180
181         eth_dev_last_created_port = port_id;
182
183         return eth_dev;
184 }
185
186 struct rte_eth_dev *
187 rte_eth_dev_allocate(const char *name)
188 {
189         uint16_t port_id;
190         struct rte_eth_dev *eth_dev;
191
192         port_id = rte_eth_dev_find_free_port();
193         if (port_id == RTE_MAX_ETHPORTS) {
194                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
195                 return NULL;
196         }
197
198         if (rte_eth_dev_data == NULL)
199                 rte_eth_dev_data_alloc();
200
201         if (rte_eth_dev_allocated(name) != NULL) {
202                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
203                                 name);
204                 return NULL;
205         }
206
207         memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data));
208         eth_dev = eth_dev_get(port_id);
209         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
210         eth_dev->data->port_id = port_id;
211         eth_dev->data->mtu = ETHER_MTU;
212
213         return eth_dev;
214 }
215
216 /*
217  * Attach to a port already registered by the primary process, which
218  * makes sure that the same device would have the same port id both
219  * in the primary and secondary process.
220  */
221 struct rte_eth_dev *
222 rte_eth_dev_attach_secondary(const char *name)
223 {
224         uint16_t i;
225         struct rte_eth_dev *eth_dev;
226
227         if (rte_eth_dev_data == NULL)
228                 rte_eth_dev_data_alloc();
229
230         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
231                 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
232                         break;
233         }
234         if (i == RTE_MAX_ETHPORTS) {
235                 RTE_PMD_DEBUG_TRACE(
236                         "device %s is not driven by the primary process\n",
237                         name);
238                 return NULL;
239         }
240
241         eth_dev = eth_dev_get(i);
242         RTE_ASSERT(eth_dev->data->port_id == i);
243
244         return eth_dev;
245 }
246
247 int
248 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
249 {
250         if (eth_dev == NULL)
251                 return -EINVAL;
252
253         eth_dev->state = RTE_ETH_DEV_UNUSED;
254         return 0;
255 }
256
257 int
258 rte_eth_dev_is_valid_port(uint16_t port_id)
259 {
260         if (port_id >= RTE_MAX_ETHPORTS ||
261             (rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
262              rte_eth_devices[port_id].state != RTE_ETH_DEV_DEFERRED))
263                 return 0;
264         else
265                 return 1;
266 }
267
268 int
269 rte_eth_dev_socket_id(uint16_t port_id)
270 {
271         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
272         return rte_eth_devices[port_id].data->numa_node;
273 }
274
275 void *
276 rte_eth_dev_get_sec_ctx(uint8_t port_id)
277 {
278         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
279         return rte_eth_devices[port_id].security_ctx;
280 }
281
282 uint16_t
283 rte_eth_dev_count(void)
284 {
285         uint16_t p;
286         uint16_t count;
287
288         count = 0;
289
290         RTE_ETH_FOREACH_DEV(p)
291                 count++;
292
293         return count;
294 }
295
296 int
297 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
298 {
299         char *tmp;
300
301         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
302
303         if (name == NULL) {
304                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
305                 return -EINVAL;
306         }
307
308         /* shouldn't check 'rte_eth_devices[i].data',
309          * because it might be overwritten by VDEV PMD */
310         tmp = rte_eth_dev_data[port_id].name;
311         strcpy(name, tmp);
312         return 0;
313 }
314
315 int
316 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
317 {
318         int i;
319
320         if (name == NULL) {
321                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
322                 return -EINVAL;
323         }
324
325         RTE_ETH_FOREACH_DEV(i) {
326                 if (!strncmp(name,
327                         rte_eth_dev_data[i].name, strlen(name))) {
328
329                         *port_id = i;
330
331                         return 0;
332                 }
333         }
334         return -ENODEV;
335 }
336
337 /* attach the new device, then store port_id of the device */
338 int
339 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
340 {
341         int ret = -1;
342         int current = rte_eth_dev_count();
343         char *name = NULL;
344         char *args = NULL;
345
346         if ((devargs == NULL) || (port_id == NULL)) {
347                 ret = -EINVAL;
348                 goto err;
349         }
350
351         /* parse devargs, then retrieve device name and args */
352         if (rte_eal_parse_devargs_str(devargs, &name, &args))
353                 goto err;
354
355         ret = rte_eal_dev_attach(name, args);
356         if (ret < 0)
357                 goto err;
358
359         /* no point looking at the port count if no port exists */
360         if (!rte_eth_dev_count()) {
361                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
362                 ret = -1;
363                 goto err;
364         }
365
366         /* if nothing happened, there is a bug here, since some driver told us
367          * it did attach a device, but did not create a port.
368          */
369         if (current == rte_eth_dev_count()) {
370                 ret = -1;
371                 goto err;
372         }
373
374         *port_id = eth_dev_last_created_port;
375         ret = 0;
376
377 err:
378         free(name);
379         free(args);
380         return ret;
381 }
382
383 /* detach the device, then store the name of the device */
384 int
385 rte_eth_dev_detach(uint16_t port_id, char *name)
386 {
387         uint32_t dev_flags;
388         int ret = -1;
389
390         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
391
392         if (name == NULL) {
393                 ret = -EINVAL;
394                 goto err;
395         }
396
397         dev_flags = rte_eth_devices[port_id].data->dev_flags;
398         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
399                 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
400                         port_id);
401                 ret = -ENOTSUP;
402                 goto err;
403         }
404
405         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
406                  "%s", rte_eth_devices[port_id].data->name);
407
408         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
409         if (ret < 0)
410                 goto err;
411
412         rte_eth_devices[port_id].state = RTE_ETH_DEV_UNUSED;
413         return 0;
414
415 err:
416         return ret;
417 }
418
419 static int
420 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
421 {
422         uint16_t old_nb_queues = dev->data->nb_rx_queues;
423         void **rxq;
424         unsigned i;
425
426         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
427                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
428                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
429                                 RTE_CACHE_LINE_SIZE);
430                 if (dev->data->rx_queues == NULL) {
431                         dev->data->nb_rx_queues = 0;
432                         return -(ENOMEM);
433                 }
434         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
435                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
436
437                 rxq = dev->data->rx_queues;
438
439                 for (i = nb_queues; i < old_nb_queues; i++)
440                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
441                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
442                                 RTE_CACHE_LINE_SIZE);
443                 if (rxq == NULL)
444                         return -(ENOMEM);
445                 if (nb_queues > old_nb_queues) {
446                         uint16_t new_qs = nb_queues - old_nb_queues;
447
448                         memset(rxq + old_nb_queues, 0,
449                                 sizeof(rxq[0]) * new_qs);
450                 }
451
452                 dev->data->rx_queues = rxq;
453
454         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
455                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
456
457                 rxq = dev->data->rx_queues;
458
459                 for (i = nb_queues; i < old_nb_queues; i++)
460                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
461
462                 rte_free(dev->data->rx_queues);
463                 dev->data->rx_queues = NULL;
464         }
465         dev->data->nb_rx_queues = nb_queues;
466         return 0;
467 }
468
469 int
470 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
471 {
472         struct rte_eth_dev *dev;
473
474         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
475
476         dev = &rte_eth_devices[port_id];
477         if (rx_queue_id >= dev->data->nb_rx_queues) {
478                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
479                 return -EINVAL;
480         }
481
482         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
483
484         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
485                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
486                         " already started\n",
487                         rx_queue_id, port_id);
488                 return 0;
489         }
490
491         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
492
493 }
494
495 int
496 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
497 {
498         struct rte_eth_dev *dev;
499
500         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
501
502         dev = &rte_eth_devices[port_id];
503         if (rx_queue_id >= dev->data->nb_rx_queues) {
504                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
505                 return -EINVAL;
506         }
507
508         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
509
510         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
511                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
512                         " already stopped\n",
513                         rx_queue_id, port_id);
514                 return 0;
515         }
516
517         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
518
519 }
520
521 int
522 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
523 {
524         struct rte_eth_dev *dev;
525
526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
527
528         dev = &rte_eth_devices[port_id];
529         if (tx_queue_id >= dev->data->nb_tx_queues) {
530                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
531                 return -EINVAL;
532         }
533
534         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
535
536         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
537                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
538                         " already started\n",
539                         tx_queue_id, port_id);
540                 return 0;
541         }
542
543         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
544
545 }
546
547 int
548 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
549 {
550         struct rte_eth_dev *dev;
551
552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
553
554         dev = &rte_eth_devices[port_id];
555         if (tx_queue_id >= dev->data->nb_tx_queues) {
556                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
557                 return -EINVAL;
558         }
559
560         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
561
562         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
563                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
564                         " already stopped\n",
565                         tx_queue_id, port_id);
566                 return 0;
567         }
568
569         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
570
571 }
572
573 static int
574 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
575 {
576         uint16_t old_nb_queues = dev->data->nb_tx_queues;
577         void **txq;
578         unsigned i;
579
580         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
581                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
582                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
583                                                    RTE_CACHE_LINE_SIZE);
584                 if (dev->data->tx_queues == NULL) {
585                         dev->data->nb_tx_queues = 0;
586                         return -(ENOMEM);
587                 }
588         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
589                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
590
591                 txq = dev->data->tx_queues;
592
593                 for (i = nb_queues; i < old_nb_queues; i++)
594                         (*dev->dev_ops->tx_queue_release)(txq[i]);
595                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
596                                   RTE_CACHE_LINE_SIZE);
597                 if (txq == NULL)
598                         return -ENOMEM;
599                 if (nb_queues > old_nb_queues) {
600                         uint16_t new_qs = nb_queues - old_nb_queues;
601
602                         memset(txq + old_nb_queues, 0,
603                                sizeof(txq[0]) * new_qs);
604                 }
605
606                 dev->data->tx_queues = txq;
607
608         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
609                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
610
611                 txq = dev->data->tx_queues;
612
613                 for (i = nb_queues; i < old_nb_queues; i++)
614                         (*dev->dev_ops->tx_queue_release)(txq[i]);
615
616                 rte_free(dev->data->tx_queues);
617                 dev->data->tx_queues = NULL;
618         }
619         dev->data->nb_tx_queues = nb_queues;
620         return 0;
621 }
622
623 uint32_t
624 rte_eth_speed_bitflag(uint32_t speed, int duplex)
625 {
626         switch (speed) {
627         case ETH_SPEED_NUM_10M:
628                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
629         case ETH_SPEED_NUM_100M:
630                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
631         case ETH_SPEED_NUM_1G:
632                 return ETH_LINK_SPEED_1G;
633         case ETH_SPEED_NUM_2_5G:
634                 return ETH_LINK_SPEED_2_5G;
635         case ETH_SPEED_NUM_5G:
636                 return ETH_LINK_SPEED_5G;
637         case ETH_SPEED_NUM_10G:
638                 return ETH_LINK_SPEED_10G;
639         case ETH_SPEED_NUM_20G:
640                 return ETH_LINK_SPEED_20G;
641         case ETH_SPEED_NUM_25G:
642                 return ETH_LINK_SPEED_25G;
643         case ETH_SPEED_NUM_40G:
644                 return ETH_LINK_SPEED_40G;
645         case ETH_SPEED_NUM_50G:
646                 return ETH_LINK_SPEED_50G;
647         case ETH_SPEED_NUM_56G:
648                 return ETH_LINK_SPEED_56G;
649         case ETH_SPEED_NUM_100G:
650                 return ETH_LINK_SPEED_100G;
651         default:
652                 return 0;
653         }
654 }
655
656 /**
657  * A conversion function from rxmode bitfield API.
658  */
659 static void
660 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
661                                     uint64_t *rx_offloads)
662 {
663         uint64_t offloads = 0;
664
665         if (rxmode->header_split == 1)
666                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
667         if (rxmode->hw_ip_checksum == 1)
668                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
669         if (rxmode->hw_vlan_filter == 1)
670                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
671         if (rxmode->hw_vlan_strip == 1)
672                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
673         if (rxmode->hw_vlan_extend == 1)
674                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
675         if (rxmode->jumbo_frame == 1)
676                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
677         if (rxmode->hw_strip_crc == 1)
678                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
679         if (rxmode->enable_scatter == 1)
680                 offloads |= DEV_RX_OFFLOAD_SCATTER;
681         if (rxmode->enable_lro == 1)
682                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
683         if (rxmode->hw_timestamp == 1)
684                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
685         if (rxmode->security == 1)
686                 offloads |= DEV_RX_OFFLOAD_SECURITY;
687
688         *rx_offloads = offloads;
689 }
690
691 /**
692  * A conversion function from rxmode offloads API.
693  */
694 static void
695 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
696                             struct rte_eth_rxmode *rxmode)
697 {
698
699         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
700                 rxmode->header_split = 1;
701         else
702                 rxmode->header_split = 0;
703         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
704                 rxmode->hw_ip_checksum = 1;
705         else
706                 rxmode->hw_ip_checksum = 0;
707         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
708                 rxmode->hw_vlan_filter = 1;
709         else
710                 rxmode->hw_vlan_filter = 0;
711         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
712                 rxmode->hw_vlan_strip = 1;
713         else
714                 rxmode->hw_vlan_strip = 0;
715         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
716                 rxmode->hw_vlan_extend = 1;
717         else
718                 rxmode->hw_vlan_extend = 0;
719         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
720                 rxmode->jumbo_frame = 1;
721         else
722                 rxmode->jumbo_frame = 0;
723         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
724                 rxmode->hw_strip_crc = 1;
725         else
726                 rxmode->hw_strip_crc = 0;
727         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
728                 rxmode->enable_scatter = 1;
729         else
730                 rxmode->enable_scatter = 0;
731         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
732                 rxmode->enable_lro = 1;
733         else
734                 rxmode->enable_lro = 0;
735         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
736                 rxmode->hw_timestamp = 1;
737         else
738                 rxmode->hw_timestamp = 0;
739         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
740                 rxmode->security = 1;
741         else
742                 rxmode->security = 0;
743 }
744
745 int
746 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
747                       const struct rte_eth_conf *dev_conf)
748 {
749         struct rte_eth_dev *dev;
750         struct rte_eth_dev_info dev_info;
751         struct rte_eth_conf local_conf = *dev_conf;
752         int diag;
753
754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
755
756         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
757                 RTE_PMD_DEBUG_TRACE(
758                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
759                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
760                 return -EINVAL;
761         }
762
763         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
764                 RTE_PMD_DEBUG_TRACE(
765                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
766                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
767                 return -EINVAL;
768         }
769
770         dev = &rte_eth_devices[port_id];
771
772         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
773         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
774
775         if (dev->data->dev_started) {
776                 RTE_PMD_DEBUG_TRACE(
777                     "port %d must be stopped to allow configuration\n", port_id);
778                 return -EBUSY;
779         }
780
781         /*
782          * Convert between the offloads API to enable PMDs to support
783          * only one of them.
784          */
785         if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
786                 rte_eth_convert_rx_offload_bitfield(
787                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
788         } else {
789                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
790                                             &local_conf.rxmode);
791         }
792
793         /* Copy the dev_conf parameter into the dev structure */
794         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
795
796         /*
797          * Check that the numbers of RX and TX queues are not greater
798          * than the maximum number of RX and TX queues supported by the
799          * configured device.
800          */
801         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
802
803         if (nb_rx_q == 0 && nb_tx_q == 0) {
804                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
805                 return -EINVAL;
806         }
807
808         if (nb_rx_q > dev_info.max_rx_queues) {
809                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
810                                 port_id, nb_rx_q, dev_info.max_rx_queues);
811                 return -EINVAL;
812         }
813
814         if (nb_tx_q > dev_info.max_tx_queues) {
815                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
816                                 port_id, nb_tx_q, dev_info.max_tx_queues);
817                 return -EINVAL;
818         }
819
820         /* Check that the device supports requested interrupts */
821         if ((dev_conf->intr_conf.lsc == 1) &&
822                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
823                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
824                                         dev->device->driver->name);
825                         return -EINVAL;
826         }
827         if ((dev_conf->intr_conf.rmv == 1) &&
828             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
829                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
830                                     dev->device->driver->name);
831                 return -EINVAL;
832         }
833
834         /*
835          * If jumbo frames are enabled, check that the maximum RX packet
836          * length is supported by the configured device.
837          */
838         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
839                 if (dev_conf->rxmode.max_rx_pkt_len >
840                     dev_info.max_rx_pktlen) {
841                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
842                                 " > max valid value %u\n",
843                                 port_id,
844                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
845                                 (unsigned)dev_info.max_rx_pktlen);
846                         return -EINVAL;
847                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
848                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
849                                 " < min valid value %u\n",
850                                 port_id,
851                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
852                                 (unsigned)ETHER_MIN_LEN);
853                         return -EINVAL;
854                 }
855         } else {
856                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
857                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
858                         /* Use default value */
859                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
860                                                         ETHER_MAX_LEN;
861         }
862
863         /*
864          * Setup new number of RX/TX queues and reconfigure device.
865          */
866         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
867         if (diag != 0) {
868                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
869                                 port_id, diag);
870                 return diag;
871         }
872
873         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
874         if (diag != 0) {
875                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
876                                 port_id, diag);
877                 rte_eth_dev_rx_queue_config(dev, 0);
878                 return diag;
879         }
880
881         diag = (*dev->dev_ops->dev_configure)(dev);
882         if (diag != 0) {
883                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
884                                 port_id, diag);
885                 rte_eth_dev_rx_queue_config(dev, 0);
886                 rte_eth_dev_tx_queue_config(dev, 0);
887                 return diag;
888         }
889
890         /* Initialize Rx profiling if enabled at compilation time. */
891         diag = __rte_eth_profile_rx_init(port_id, dev);
892         if (diag != 0) {
893                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
894                                 port_id, diag);
895                 rte_eth_dev_rx_queue_config(dev, 0);
896                 rte_eth_dev_tx_queue_config(dev, 0);
897                 return diag;
898         }
899
900         return 0;
901 }
902
903 void
904 _rte_eth_dev_reset(struct rte_eth_dev *dev)
905 {
906         if (dev->data->dev_started) {
907                 RTE_PMD_DEBUG_TRACE(
908                         "port %d must be stopped to allow reset\n",
909                         dev->data->port_id);
910                 return;
911         }
912
913         rte_eth_dev_rx_queue_config(dev, 0);
914         rte_eth_dev_tx_queue_config(dev, 0);
915
916         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
917 }
918
919 static void
920 rte_eth_dev_config_restore(uint16_t port_id)
921 {
922         struct rte_eth_dev *dev;
923         struct rte_eth_dev_info dev_info;
924         struct ether_addr *addr;
925         uint16_t i;
926         uint32_t pool = 0;
927         uint64_t pool_mask;
928
929         dev = &rte_eth_devices[port_id];
930
931         rte_eth_dev_info_get(port_id, &dev_info);
932
933         /* replay MAC address configuration including default MAC */
934         addr = &dev->data->mac_addrs[0];
935         if (*dev->dev_ops->mac_addr_set != NULL)
936                 (*dev->dev_ops->mac_addr_set)(dev, addr);
937         else if (*dev->dev_ops->mac_addr_add != NULL)
938                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
939
940         if (*dev->dev_ops->mac_addr_add != NULL) {
941                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
942                         addr = &dev->data->mac_addrs[i];
943
944                         /* skip zero address */
945                         if (is_zero_ether_addr(addr))
946                                 continue;
947
948                         pool = 0;
949                         pool_mask = dev->data->mac_pool_sel[i];
950
951                         do {
952                                 if (pool_mask & 1ULL)
953                                         (*dev->dev_ops->mac_addr_add)(dev,
954                                                 addr, i, pool);
955                                 pool_mask >>= 1;
956                                 pool++;
957                         } while (pool_mask);
958                 }
959         }
960
961         /* replay promiscuous configuration */
962         if (rte_eth_promiscuous_get(port_id) == 1)
963                 rte_eth_promiscuous_enable(port_id);
964         else if (rte_eth_promiscuous_get(port_id) == 0)
965                 rte_eth_promiscuous_disable(port_id);
966
967         /* replay all multicast configuration */
968         if (rte_eth_allmulticast_get(port_id) == 1)
969                 rte_eth_allmulticast_enable(port_id);
970         else if (rte_eth_allmulticast_get(port_id) == 0)
971                 rte_eth_allmulticast_disable(port_id);
972 }
973
974 int
975 rte_eth_dev_start(uint16_t port_id)
976 {
977         struct rte_eth_dev *dev;
978         int diag;
979
980         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
981
982         dev = &rte_eth_devices[port_id];
983
984         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
985
986         if (dev->data->dev_started != 0) {
987                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
988                         " already started\n",
989                         port_id);
990                 return 0;
991         }
992
993         diag = (*dev->dev_ops->dev_start)(dev);
994         if (diag == 0)
995                 dev->data->dev_started = 1;
996         else
997                 return diag;
998
999         rte_eth_dev_config_restore(port_id);
1000
1001         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1002                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1003                 (*dev->dev_ops->link_update)(dev, 0);
1004         }
1005         return 0;
1006 }
1007
1008 void
1009 rte_eth_dev_stop(uint16_t port_id)
1010 {
1011         struct rte_eth_dev *dev;
1012
1013         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1014         dev = &rte_eth_devices[port_id];
1015
1016         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1017
1018         if (dev->data->dev_started == 0) {
1019                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1020                         " already stopped\n",
1021                         port_id);
1022                 return;
1023         }
1024
1025         dev->data->dev_started = 0;
1026         (*dev->dev_ops->dev_stop)(dev);
1027 }
1028
1029 int
1030 rte_eth_dev_set_link_up(uint16_t port_id)
1031 {
1032         struct rte_eth_dev *dev;
1033
1034         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1035
1036         dev = &rte_eth_devices[port_id];
1037
1038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1039         return (*dev->dev_ops->dev_set_link_up)(dev);
1040 }
1041
1042 int
1043 rte_eth_dev_set_link_down(uint16_t port_id)
1044 {
1045         struct rte_eth_dev *dev;
1046
1047         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1048
1049         dev = &rte_eth_devices[port_id];
1050
1051         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1052         return (*dev->dev_ops->dev_set_link_down)(dev);
1053 }
1054
1055 void
1056 rte_eth_dev_close(uint16_t port_id)
1057 {
1058         struct rte_eth_dev *dev;
1059
1060         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1061         dev = &rte_eth_devices[port_id];
1062
1063         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1064         dev->data->dev_started = 0;
1065         (*dev->dev_ops->dev_close)(dev);
1066
1067         dev->data->nb_rx_queues = 0;
1068         rte_free(dev->data->rx_queues);
1069         dev->data->rx_queues = NULL;
1070         dev->data->nb_tx_queues = 0;
1071         rte_free(dev->data->tx_queues);
1072         dev->data->tx_queues = NULL;
1073 }
1074
1075 int
1076 rte_eth_dev_reset(uint16_t port_id)
1077 {
1078         struct rte_eth_dev *dev;
1079         int ret;
1080
1081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1082         dev = &rte_eth_devices[port_id];
1083
1084         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1085
1086         rte_eth_dev_stop(port_id);
1087         ret = dev->dev_ops->dev_reset(dev);
1088
1089         return ret;
1090 }
1091
1092 int
1093 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1094                        uint16_t nb_rx_desc, unsigned int socket_id,
1095                        const struct rte_eth_rxconf *rx_conf,
1096                        struct rte_mempool *mp)
1097 {
1098         int ret;
1099         uint32_t mbp_buf_size;
1100         struct rte_eth_dev *dev;
1101         struct rte_eth_dev_info dev_info;
1102         struct rte_eth_rxconf local_conf;
1103         void **rxq;
1104
1105         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1106
1107         dev = &rte_eth_devices[port_id];
1108         if (rx_queue_id >= dev->data->nb_rx_queues) {
1109                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1110                 return -EINVAL;
1111         }
1112
1113         if (dev->data->dev_started) {
1114                 RTE_PMD_DEBUG_TRACE(
1115                     "port %d must be stopped to allow configuration\n", port_id);
1116                 return -EBUSY;
1117         }
1118
1119         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1120         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1121
1122         /*
1123          * Check the size of the mbuf data buffer.
1124          * This value must be provided in the private data of the memory pool.
1125          * First check that the memory pool has a valid private data.
1126          */
1127         rte_eth_dev_info_get(port_id, &dev_info);
1128         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1129                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1130                                 mp->name, (int) mp->private_data_size,
1131                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1132                 return -ENOSPC;
1133         }
1134         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1135
1136         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1137                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1138                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1139                                 "=%d)\n",
1140                                 mp->name,
1141                                 (int)mbp_buf_size,
1142                                 (int)(RTE_PKTMBUF_HEADROOM +
1143                                       dev_info.min_rx_bufsize),
1144                                 (int)RTE_PKTMBUF_HEADROOM,
1145                                 (int)dev_info.min_rx_bufsize);
1146                 return -EINVAL;
1147         }
1148
1149         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1150                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1151                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1152
1153                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1154                         "should be: <= %hu, = %hu, and a product of %hu\n",
1155                         nb_rx_desc,
1156                         dev_info.rx_desc_lim.nb_max,
1157                         dev_info.rx_desc_lim.nb_min,
1158                         dev_info.rx_desc_lim.nb_align);
1159                 return -EINVAL;
1160         }
1161
1162         rxq = dev->data->rx_queues;
1163         if (rxq[rx_queue_id]) {
1164                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1165                                         -ENOTSUP);
1166                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1167                 rxq[rx_queue_id] = NULL;
1168         }
1169
1170         if (rx_conf == NULL)
1171                 rx_conf = &dev_info.default_rxconf;
1172
1173         local_conf = *rx_conf;
1174         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1175                 /**
1176                  * Reflect port offloads to queue offloads in order for
1177                  * offloads to not be discarded.
1178                  */
1179                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1180                                                     &local_conf.offloads);
1181         }
1182
1183         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1184                                               socket_id, &local_conf, mp);
1185         if (!ret) {
1186                 if (!dev->data->min_rx_buf_size ||
1187                     dev->data->min_rx_buf_size > mbp_buf_size)
1188                         dev->data->min_rx_buf_size = mbp_buf_size;
1189         }
1190
1191         return ret;
1192 }
1193
1194 /**
1195  * A conversion function from txq_flags API.
1196  */
1197 static void
1198 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1199 {
1200         uint64_t offloads = 0;
1201
1202         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1203                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1204         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1205                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1206         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1207                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1208         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1209                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1210         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1211                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1212         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1213             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1214                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1215
1216         *tx_offloads = offloads;
1217 }
1218
1219 /**
1220  * A conversion function from offloads API.
1221  */
1222 static void
1223 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1224 {
1225         uint32_t flags = 0;
1226
1227         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1228                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1229         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1230                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1231         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1232                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1233         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1234                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1235         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1236                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1237         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1238                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1239
1240         *txq_flags = flags;
1241 }
1242
1243 int
1244 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1245                        uint16_t nb_tx_desc, unsigned int socket_id,
1246                        const struct rte_eth_txconf *tx_conf)
1247 {
1248         struct rte_eth_dev *dev;
1249         struct rte_eth_dev_info dev_info;
1250         struct rte_eth_txconf local_conf;
1251         void **txq;
1252
1253         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1254
1255         dev = &rte_eth_devices[port_id];
1256         if (tx_queue_id >= dev->data->nb_tx_queues) {
1257                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1258                 return -EINVAL;
1259         }
1260
1261         if (dev->data->dev_started) {
1262                 RTE_PMD_DEBUG_TRACE(
1263                     "port %d must be stopped to allow configuration\n", port_id);
1264                 return -EBUSY;
1265         }
1266
1267         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1268         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1269
1270         rte_eth_dev_info_get(port_id, &dev_info);
1271
1272         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1273             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1274             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1275                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1276                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1277                                 nb_tx_desc,
1278                                 dev_info.tx_desc_lim.nb_max,
1279                                 dev_info.tx_desc_lim.nb_min,
1280                                 dev_info.tx_desc_lim.nb_align);
1281                 return -EINVAL;
1282         }
1283
1284         txq = dev->data->tx_queues;
1285         if (txq[tx_queue_id]) {
1286                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1287                                         -ENOTSUP);
1288                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1289                 txq[tx_queue_id] = NULL;
1290         }
1291
1292         if (tx_conf == NULL)
1293                 tx_conf = &dev_info.default_txconf;
1294
1295         /*
1296          * Convert between the offloads API to enable PMDs to support
1297          * only one of them.
1298          */
1299         local_conf = *tx_conf;
1300         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1301                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1302                                              &local_conf.txq_flags);
1303                 /* Keep the ignore flag. */
1304                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1305         } else {
1306                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1307                                           &local_conf.offloads);
1308         }
1309
1310         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1311                                                socket_id, &local_conf);
1312 }
1313
1314 void
1315 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1316                 void *userdata __rte_unused)
1317 {
1318         unsigned i;
1319
1320         for (i = 0; i < unsent; i++)
1321                 rte_pktmbuf_free(pkts[i]);
1322 }
1323
1324 void
1325 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1326                 void *userdata)
1327 {
1328         uint64_t *count = userdata;
1329         unsigned i;
1330
1331         for (i = 0; i < unsent; i++)
1332                 rte_pktmbuf_free(pkts[i]);
1333
1334         *count += unsent;
1335 }
1336
1337 int
1338 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1339                 buffer_tx_error_fn cbfn, void *userdata)
1340 {
1341         buffer->error_callback = cbfn;
1342         buffer->error_userdata = userdata;
1343         return 0;
1344 }
1345
1346 int
1347 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1348 {
1349         int ret = 0;
1350
1351         if (buffer == NULL)
1352                 return -EINVAL;
1353
1354         buffer->size = size;
1355         if (buffer->error_callback == NULL) {
1356                 ret = rte_eth_tx_buffer_set_err_callback(
1357                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1358         }
1359
1360         return ret;
1361 }
1362
1363 int
1364 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1365 {
1366         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1367
1368         /* Validate Input Data. Bail if not valid or not supported. */
1369         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1370         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1371
1372         /* Call driver to free pending mbufs. */
1373         return (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1374                         free_cnt);
1375 }
1376
1377 void
1378 rte_eth_promiscuous_enable(uint16_t port_id)
1379 {
1380         struct rte_eth_dev *dev;
1381
1382         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1383         dev = &rte_eth_devices[port_id];
1384
1385         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1386         (*dev->dev_ops->promiscuous_enable)(dev);
1387         dev->data->promiscuous = 1;
1388 }
1389
1390 void
1391 rte_eth_promiscuous_disable(uint16_t port_id)
1392 {
1393         struct rte_eth_dev *dev;
1394
1395         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1396         dev = &rte_eth_devices[port_id];
1397
1398         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1399         dev->data->promiscuous = 0;
1400         (*dev->dev_ops->promiscuous_disable)(dev);
1401 }
1402
1403 int
1404 rte_eth_promiscuous_get(uint16_t port_id)
1405 {
1406         struct rte_eth_dev *dev;
1407
1408         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1409
1410         dev = &rte_eth_devices[port_id];
1411         return dev->data->promiscuous;
1412 }
1413
1414 void
1415 rte_eth_allmulticast_enable(uint16_t port_id)
1416 {
1417         struct rte_eth_dev *dev;
1418
1419         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1420         dev = &rte_eth_devices[port_id];
1421
1422         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1423         (*dev->dev_ops->allmulticast_enable)(dev);
1424         dev->data->all_multicast = 1;
1425 }
1426
1427 void
1428 rte_eth_allmulticast_disable(uint16_t port_id)
1429 {
1430         struct rte_eth_dev *dev;
1431
1432         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1433         dev = &rte_eth_devices[port_id];
1434
1435         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1436         dev->data->all_multicast = 0;
1437         (*dev->dev_ops->allmulticast_disable)(dev);
1438 }
1439
1440 int
1441 rte_eth_allmulticast_get(uint16_t port_id)
1442 {
1443         struct rte_eth_dev *dev;
1444
1445         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1446
1447         dev = &rte_eth_devices[port_id];
1448         return dev->data->all_multicast;
1449 }
1450
1451 static inline int
1452 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1453                                 struct rte_eth_link *link)
1454 {
1455         struct rte_eth_link *dst = link;
1456         struct rte_eth_link *src = &(dev->data->dev_link);
1457
1458         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1459                                         *(uint64_t *)src) == 0)
1460                 return -1;
1461
1462         return 0;
1463 }
1464
1465 void
1466 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1467 {
1468         struct rte_eth_dev *dev;
1469
1470         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1471         dev = &rte_eth_devices[port_id];
1472
1473         if (dev->data->dev_conf.intr_conf.lsc != 0)
1474                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1475         else {
1476                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1477                 (*dev->dev_ops->link_update)(dev, 1);
1478                 *eth_link = dev->data->dev_link;
1479         }
1480 }
1481
1482 void
1483 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1484 {
1485         struct rte_eth_dev *dev;
1486
1487         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1488         dev = &rte_eth_devices[port_id];
1489
1490         if (dev->data->dev_conf.intr_conf.lsc != 0)
1491                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1492         else {
1493                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1494                 (*dev->dev_ops->link_update)(dev, 0);
1495                 *eth_link = dev->data->dev_link;
1496         }
1497 }
1498
1499 int
1500 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1501 {
1502         struct rte_eth_dev *dev;
1503
1504         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1505
1506         dev = &rte_eth_devices[port_id];
1507         memset(stats, 0, sizeof(*stats));
1508
1509         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1510         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1511         return (*dev->dev_ops->stats_get)(dev, stats);
1512 }
1513
1514 int
1515 rte_eth_stats_reset(uint16_t port_id)
1516 {
1517         struct rte_eth_dev *dev;
1518
1519         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1520         dev = &rte_eth_devices[port_id];
1521
1522         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1523         (*dev->dev_ops->stats_reset)(dev);
1524         dev->data->rx_mbuf_alloc_failed = 0;
1525
1526         return 0;
1527 }
1528
1529 static inline int
1530 get_xstats_basic_count(struct rte_eth_dev *dev)
1531 {
1532         uint16_t nb_rxqs, nb_txqs;
1533         int count;
1534
1535         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1536         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1537
1538         count = RTE_NB_STATS;
1539         count += nb_rxqs * RTE_NB_RXQ_STATS;
1540         count += nb_txqs * RTE_NB_TXQ_STATS;
1541
1542         return count;
1543 }
1544
1545 static int
1546 get_xstats_count(uint16_t port_id)
1547 {
1548         struct rte_eth_dev *dev;
1549         int count;
1550
1551         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1552         dev = &rte_eth_devices[port_id];
1553         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1554                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1555                                 NULL, 0);
1556                 if (count < 0)
1557                         return count;
1558         }
1559         if (dev->dev_ops->xstats_get_names != NULL) {
1560                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1561                 if (count < 0)
1562                         return count;
1563         } else
1564                 count = 0;
1565
1566
1567         count += get_xstats_basic_count(dev);
1568
1569         return count;
1570 }
1571
1572 int
1573 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1574                 uint64_t *id)
1575 {
1576         int cnt_xstats, idx_xstat;
1577
1578         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1579
1580         if (!id) {
1581                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1582                 return -ENOMEM;
1583         }
1584
1585         if (!xstat_name) {
1586                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1587                 return -ENOMEM;
1588         }
1589
1590         /* Get count */
1591         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1592         if (cnt_xstats  < 0) {
1593                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1594                 return -ENODEV;
1595         }
1596
1597         /* Get id-name lookup table */
1598         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1599
1600         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1601                         port_id, xstats_names, cnt_xstats, NULL)) {
1602                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1603                 return -1;
1604         }
1605
1606         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1607                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1608                         *id = idx_xstat;
1609                         return 0;
1610                 };
1611         }
1612
1613         return -EINVAL;
1614 }
1615
1616 /* retrieve basic stats names */
1617 static int
1618 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1619         struct rte_eth_xstat_name *xstats_names)
1620 {
1621         int cnt_used_entries = 0;
1622         uint32_t idx, id_queue;
1623         uint16_t num_q;
1624
1625         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1626                 snprintf(xstats_names[cnt_used_entries].name,
1627                         sizeof(xstats_names[0].name),
1628                         "%s", rte_stats_strings[idx].name);
1629                 cnt_used_entries++;
1630         }
1631         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1632         for (id_queue = 0; id_queue < num_q; id_queue++) {
1633                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1634                         snprintf(xstats_names[cnt_used_entries].name,
1635                                 sizeof(xstats_names[0].name),
1636                                 "rx_q%u%s",
1637                                 id_queue, rte_rxq_stats_strings[idx].name);
1638                         cnt_used_entries++;
1639                 }
1640
1641         }
1642         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1643         for (id_queue = 0; id_queue < num_q; id_queue++) {
1644                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1645                         snprintf(xstats_names[cnt_used_entries].name,
1646                                 sizeof(xstats_names[0].name),
1647                                 "tx_q%u%s",
1648                                 id_queue, rte_txq_stats_strings[idx].name);
1649                         cnt_used_entries++;
1650                 }
1651         }
1652         return cnt_used_entries;
1653 }
1654
1655 /* retrieve ethdev extended statistics names */
1656 int
1657 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1658         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1659         uint64_t *ids)
1660 {
1661         struct rte_eth_xstat_name *xstats_names_copy;
1662         unsigned int no_basic_stat_requested = 1;
1663         unsigned int expected_entries;
1664         struct rte_eth_dev *dev;
1665         unsigned int i;
1666         int ret;
1667
1668         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1669         dev = &rte_eth_devices[port_id];
1670
1671         ret = get_xstats_count(port_id);
1672         if (ret < 0)
1673                 return ret;
1674         expected_entries = (unsigned int)ret;
1675
1676         /* Return max number of stats if no ids given */
1677         if (!ids) {
1678                 if (!xstats_names)
1679                         return expected_entries;
1680                 else if (xstats_names && size < expected_entries)
1681                         return expected_entries;
1682         }
1683
1684         if (ids && !xstats_names)
1685                 return -EINVAL;
1686
1687         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1688                 unsigned int basic_count = get_xstats_basic_count(dev);
1689                 uint64_t ids_copy[size];
1690
1691                 for (i = 0; i < size; i++) {
1692                         if (ids[i] < basic_count) {
1693                                 no_basic_stat_requested = 0;
1694                                 break;
1695                         }
1696
1697                         /*
1698                          * Convert ids to xstats ids that PMD knows.
1699                          * ids known by user are basic + extended stats.
1700                          */
1701                         ids_copy[i] = ids[i] - basic_count;
1702                 }
1703
1704                 if (no_basic_stat_requested)
1705                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
1706                                         xstats_names, ids_copy, size);
1707         }
1708
1709         /* Retrieve all stats */
1710         if (!ids) {
1711                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
1712                                 expected_entries);
1713                 if (num_stats < 0 || num_stats > (int)expected_entries)
1714                         return num_stats;
1715                 else
1716                         return expected_entries;
1717         }
1718
1719         xstats_names_copy = calloc(expected_entries,
1720                 sizeof(struct rte_eth_xstat_name));
1721
1722         if (!xstats_names_copy) {
1723                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
1724                 return -ENOMEM;
1725         }
1726
1727         /* Fill xstats_names_copy structure */
1728         rte_eth_xstats_get_names(port_id, xstats_names_copy, expected_entries);
1729
1730         /* Filter stats */
1731         for (i = 0; i < size; i++) {
1732                 if (ids[i] >= expected_entries) {
1733                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1734                         free(xstats_names_copy);
1735                         return -1;
1736                 }
1737                 xstats_names[i] = xstats_names_copy[ids[i]];
1738         }
1739
1740         free(xstats_names_copy);
1741         return size;
1742 }
1743
1744 int
1745 rte_eth_xstats_get_names(uint16_t port_id,
1746         struct rte_eth_xstat_name *xstats_names,
1747         unsigned int size)
1748 {
1749         struct rte_eth_dev *dev;
1750         int cnt_used_entries;
1751         int cnt_expected_entries;
1752         int cnt_driver_entries;
1753
1754         cnt_expected_entries = get_xstats_count(port_id);
1755         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1756                         (int)size < cnt_expected_entries)
1757                 return cnt_expected_entries;
1758
1759         /* port_id checked in get_xstats_count() */
1760         dev = &rte_eth_devices[port_id];
1761
1762         cnt_used_entries = rte_eth_basic_stats_get_names(
1763                 dev, xstats_names);
1764
1765         if (dev->dev_ops->xstats_get_names != NULL) {
1766                 /* If there are any driver-specific xstats, append them
1767                  * to end of list.
1768                  */
1769                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1770                         dev,
1771                         xstats_names + cnt_used_entries,
1772                         size - cnt_used_entries);
1773                 if (cnt_driver_entries < 0)
1774                         return cnt_driver_entries;
1775                 cnt_used_entries += cnt_driver_entries;
1776         }
1777
1778         return cnt_used_entries;
1779 }
1780
1781
1782 static int
1783 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
1784 {
1785         struct rte_eth_dev *dev;
1786         struct rte_eth_stats eth_stats;
1787         unsigned int count = 0, i, q;
1788         uint64_t val, *stats_ptr;
1789         uint16_t nb_rxqs, nb_txqs;
1790
1791         rte_eth_stats_get(port_id, &eth_stats);
1792         dev = &rte_eth_devices[port_id];
1793
1794         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1795         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1796
1797         /* global stats */
1798         for (i = 0; i < RTE_NB_STATS; i++) {
1799                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1800                                         rte_stats_strings[i].offset);
1801                 val = *stats_ptr;
1802                 xstats[count++].value = val;
1803         }
1804
1805         /* per-rxq stats */
1806         for (q = 0; q < nb_rxqs; q++) {
1807                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1808                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1809                                         rte_rxq_stats_strings[i].offset +
1810                                         q * sizeof(uint64_t));
1811                         val = *stats_ptr;
1812                         xstats[count++].value = val;
1813                 }
1814         }
1815
1816         /* per-txq stats */
1817         for (q = 0; q < nb_txqs; q++) {
1818                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1819                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1820                                         rte_txq_stats_strings[i].offset +
1821                                         q * sizeof(uint64_t));
1822                         val = *stats_ptr;
1823                         xstats[count++].value = val;
1824                 }
1825         }
1826         return count;
1827 }
1828
1829 /* retrieve ethdev extended statistics */
1830 int
1831 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
1832                          uint64_t *values, unsigned int size)
1833 {
1834         unsigned int no_basic_stat_requested = 1;
1835         unsigned int num_xstats_filled;
1836         uint16_t expected_entries;
1837         struct rte_eth_dev *dev;
1838         unsigned int i;
1839         int ret;
1840
1841         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1842         expected_entries = get_xstats_count(port_id);
1843         struct rte_eth_xstat xstats[expected_entries];
1844         dev = &rte_eth_devices[port_id];
1845
1846         /* Return max number of stats if no ids given */
1847         if (!ids) {
1848                 if (!values)
1849                         return expected_entries;
1850                 else if (values && size < expected_entries)
1851                         return expected_entries;
1852         }
1853
1854         if (ids && !values)
1855                 return -EINVAL;
1856
1857         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
1858                 unsigned int basic_count = get_xstats_basic_count(dev);
1859                 uint64_t ids_copy[size];
1860
1861                 for (i = 0; i < size; i++) {
1862                         if (ids[i] < basic_count) {
1863                                 no_basic_stat_requested = 0;
1864                                 break;
1865                         }
1866
1867                         /*
1868                          * Convert ids to xstats ids that PMD knows.
1869                          * ids known by user are basic + extended stats.
1870                          */
1871                         ids_copy[i] = ids[i] - basic_count;
1872                 }
1873
1874                 if (no_basic_stat_requested)
1875                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
1876                                         values, size);
1877         }
1878
1879         /* Fill the xstats structure */
1880         ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
1881         if (ret < 0)
1882                 return ret;
1883         num_xstats_filled = (unsigned int)ret;
1884
1885         /* Return all stats */
1886         if (!ids) {
1887                 for (i = 0; i < num_xstats_filled; i++)
1888                         values[i] = xstats[i].value;
1889                 return expected_entries;
1890         }
1891
1892         /* Filter stats */
1893         for (i = 0; i < size; i++) {
1894                 if (ids[i] >= expected_entries) {
1895                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1896                         return -1;
1897                 }
1898                 values[i] = xstats[ids[i]].value;
1899         }
1900         return size;
1901 }
1902
1903 int
1904 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
1905         unsigned int n)
1906 {
1907         struct rte_eth_dev *dev;
1908         unsigned int count = 0, i;
1909         signed int xcount = 0;
1910         uint16_t nb_rxqs, nb_txqs;
1911
1912         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1913
1914         dev = &rte_eth_devices[port_id];
1915
1916         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1917         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1918
1919         /* Return generic statistics */
1920         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1921                 (nb_txqs * RTE_NB_TXQ_STATS);
1922
1923         /* implemented by the driver */
1924         if (dev->dev_ops->xstats_get != NULL) {
1925                 /* Retrieve the xstats from the driver at the end of the
1926                  * xstats struct.
1927                  */
1928                 xcount = (*dev->dev_ops->xstats_get)(dev,
1929                                      xstats ? xstats + count : NULL,
1930                                      (n > count) ? n - count : 0);
1931
1932                 if (xcount < 0)
1933                         return xcount;
1934         }
1935
1936         if (n < count + xcount || xstats == NULL)
1937                 return count + xcount;
1938
1939         /* now fill the xstats structure */
1940         count = rte_eth_basic_stats_get(port_id, xstats);
1941
1942         for (i = 0; i < count; i++)
1943                 xstats[i].id = i;
1944         /* add an offset to driver-specific stats */
1945         for ( ; i < count + xcount; i++)
1946                 xstats[i].id += count;
1947
1948         return count + xcount;
1949 }
1950
1951 /* reset ethdev extended statistics */
1952 void
1953 rte_eth_xstats_reset(uint16_t port_id)
1954 {
1955         struct rte_eth_dev *dev;
1956
1957         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1958         dev = &rte_eth_devices[port_id];
1959
1960         /* implemented by the driver */
1961         if (dev->dev_ops->xstats_reset != NULL) {
1962                 (*dev->dev_ops->xstats_reset)(dev);
1963                 return;
1964         }
1965
1966         /* fallback to default */
1967         rte_eth_stats_reset(port_id);
1968 }
1969
1970 static int
1971 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
1972                 uint8_t is_rx)
1973 {
1974         struct rte_eth_dev *dev;
1975
1976         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1977
1978         dev = &rte_eth_devices[port_id];
1979
1980         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1981         return (*dev->dev_ops->queue_stats_mapping_set)
1982                         (dev, queue_id, stat_idx, is_rx);
1983 }
1984
1985
1986 int
1987 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
1988                 uint8_t stat_idx)
1989 {
1990         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1991                         STAT_QMAP_TX);
1992 }
1993
1994
1995 int
1996 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
1997                 uint8_t stat_idx)
1998 {
1999         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
2000                         STAT_QMAP_RX);
2001 }
2002
2003 int
2004 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2005 {
2006         struct rte_eth_dev *dev;
2007
2008         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2009         dev = &rte_eth_devices[port_id];
2010
2011         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2012         return (*dev->dev_ops->fw_version_get)(dev, fw_version, fw_size);
2013 }
2014
2015 void
2016 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2017 {
2018         struct rte_eth_dev *dev;
2019         const struct rte_eth_desc_lim lim = {
2020                 .nb_max = UINT16_MAX,
2021                 .nb_min = 0,
2022                 .nb_align = 1,
2023         };
2024
2025         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2026         dev = &rte_eth_devices[port_id];
2027
2028         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2029         dev_info->rx_desc_lim = lim;
2030         dev_info->tx_desc_lim = lim;
2031
2032         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2033         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2034         dev_info->driver_name = dev->device->driver->name;
2035         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2036         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2037 }
2038
2039 int
2040 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2041                                  uint32_t *ptypes, int num)
2042 {
2043         int i, j;
2044         struct rte_eth_dev *dev;
2045         const uint32_t *all_ptypes;
2046
2047         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2048         dev = &rte_eth_devices[port_id];
2049         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2050         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2051
2052         if (!all_ptypes)
2053                 return 0;
2054
2055         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2056                 if (all_ptypes[i] & ptype_mask) {
2057                         if (j < num)
2058                                 ptypes[j] = all_ptypes[i];
2059                         j++;
2060                 }
2061
2062         return j;
2063 }
2064
2065 void
2066 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2067 {
2068         struct rte_eth_dev *dev;
2069
2070         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2071         dev = &rte_eth_devices[port_id];
2072         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2073 }
2074
2075
2076 int
2077 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2078 {
2079         struct rte_eth_dev *dev;
2080
2081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2082
2083         dev = &rte_eth_devices[port_id];
2084         *mtu = dev->data->mtu;
2085         return 0;
2086 }
2087
2088 int
2089 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2090 {
2091         int ret;
2092         struct rte_eth_dev *dev;
2093
2094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2095         dev = &rte_eth_devices[port_id];
2096         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2097
2098         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2099         if (!ret)
2100                 dev->data->mtu = mtu;
2101
2102         return ret;
2103 }
2104
2105 int
2106 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2107 {
2108         struct rte_eth_dev *dev;
2109         int ret;
2110
2111         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2112         dev = &rte_eth_devices[port_id];
2113         if (!(dev->data->dev_conf.rxmode.offloads &
2114               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2115                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2116                 return -ENOSYS;
2117         }
2118
2119         if (vlan_id > 4095) {
2120                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2121                                 port_id, (unsigned) vlan_id);
2122                 return -EINVAL;
2123         }
2124         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2125
2126         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2127         if (ret == 0) {
2128                 struct rte_vlan_filter_conf *vfc;
2129                 int vidx;
2130                 int vbit;
2131
2132                 vfc = &dev->data->vlan_filter_conf;
2133                 vidx = vlan_id / 64;
2134                 vbit = vlan_id % 64;
2135
2136                 if (on)
2137                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2138                 else
2139                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2140         }
2141
2142         return ret;
2143 }
2144
2145 int
2146 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2147                                     int on)
2148 {
2149         struct rte_eth_dev *dev;
2150
2151         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2152         dev = &rte_eth_devices[port_id];
2153         if (rx_queue_id >= dev->data->nb_rx_queues) {
2154                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2155                 return -EINVAL;
2156         }
2157
2158         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2159         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2160
2161         return 0;
2162 }
2163
2164 int
2165 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2166                                 enum rte_vlan_type vlan_type,
2167                                 uint16_t tpid)
2168 {
2169         struct rte_eth_dev *dev;
2170
2171         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2172         dev = &rte_eth_devices[port_id];
2173         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2174
2175         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
2176 }
2177
2178 int
2179 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2180 {
2181         struct rte_eth_dev *dev;
2182         int ret = 0;
2183         int mask = 0;
2184         int cur, org = 0;
2185         uint64_t orig_offloads;
2186
2187         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2188         dev = &rte_eth_devices[port_id];
2189
2190         /* save original values in case of failure */
2191         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2192
2193         /*check which option changed by application*/
2194         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2195         org = !!(dev->data->dev_conf.rxmode.offloads &
2196                  DEV_RX_OFFLOAD_VLAN_STRIP);
2197         if (cur != org) {
2198                 if (cur)
2199                         dev->data->dev_conf.rxmode.offloads |=
2200                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2201                 else
2202                         dev->data->dev_conf.rxmode.offloads &=
2203                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2204                 mask |= ETH_VLAN_STRIP_MASK;
2205         }
2206
2207         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2208         org = !!(dev->data->dev_conf.rxmode.offloads &
2209                  DEV_RX_OFFLOAD_VLAN_FILTER);
2210         if (cur != org) {
2211                 if (cur)
2212                         dev->data->dev_conf.rxmode.offloads |=
2213                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2214                 else
2215                         dev->data->dev_conf.rxmode.offloads &=
2216                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2217                 mask |= ETH_VLAN_FILTER_MASK;
2218         }
2219
2220         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2221         org = !!(dev->data->dev_conf.rxmode.offloads &
2222                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2223         if (cur != org) {
2224                 if (cur)
2225                         dev->data->dev_conf.rxmode.offloads |=
2226                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2227                 else
2228                         dev->data->dev_conf.rxmode.offloads &=
2229                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2230                 mask |= ETH_VLAN_EXTEND_MASK;
2231         }
2232
2233         /*no change*/
2234         if (mask == 0)
2235                 return ret;
2236
2237         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2238
2239         /*
2240          * Convert to the offload bitfield API just in case the underlying PMD
2241          * still supporting it.
2242          */
2243         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2244                                     &dev->data->dev_conf.rxmode);
2245         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2246         if (ret) {
2247                 /* hit an error restore  original values */
2248                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2249                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2250                                             &dev->data->dev_conf.rxmode);
2251         }
2252
2253         return ret;
2254 }
2255
2256 int
2257 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2258 {
2259         struct rte_eth_dev *dev;
2260         int ret = 0;
2261
2262         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2263         dev = &rte_eth_devices[port_id];
2264
2265         if (dev->data->dev_conf.rxmode.offloads &
2266             DEV_RX_OFFLOAD_VLAN_STRIP)
2267                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2268
2269         if (dev->data->dev_conf.rxmode.offloads &
2270             DEV_RX_OFFLOAD_VLAN_FILTER)
2271                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2272
2273         if (dev->data->dev_conf.rxmode.offloads &
2274             DEV_RX_OFFLOAD_VLAN_EXTEND)
2275                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2276
2277         return ret;
2278 }
2279
2280 int
2281 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2282 {
2283         struct rte_eth_dev *dev;
2284
2285         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2286         dev = &rte_eth_devices[port_id];
2287         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2288         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
2289
2290         return 0;
2291 }
2292
2293 int
2294 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2295 {
2296         struct rte_eth_dev *dev;
2297
2298         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2299         dev = &rte_eth_devices[port_id];
2300         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2301         memset(fc_conf, 0, sizeof(*fc_conf));
2302         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2303 }
2304
2305 int
2306 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2307 {
2308         struct rte_eth_dev *dev;
2309
2310         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2311         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2312                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2313                 return -EINVAL;
2314         }
2315
2316         dev = &rte_eth_devices[port_id];
2317         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2318         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2319 }
2320
2321 int
2322 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2323                                    struct rte_eth_pfc_conf *pfc_conf)
2324 {
2325         struct rte_eth_dev *dev;
2326
2327         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2328         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2329                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2330                 return -EINVAL;
2331         }
2332
2333         dev = &rte_eth_devices[port_id];
2334         /* High water, low water validation are device specific */
2335         if  (*dev->dev_ops->priority_flow_ctrl_set)
2336                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2337         return -ENOTSUP;
2338 }
2339
2340 static int
2341 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2342                         uint16_t reta_size)
2343 {
2344         uint16_t i, num;
2345
2346         if (!reta_conf)
2347                 return -EINVAL;
2348
2349         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2350         for (i = 0; i < num; i++) {
2351                 if (reta_conf[i].mask)
2352                         return 0;
2353         }
2354
2355         return -EINVAL;
2356 }
2357
2358 static int
2359 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2360                          uint16_t reta_size,
2361                          uint16_t max_rxq)
2362 {
2363         uint16_t i, idx, shift;
2364
2365         if (!reta_conf)
2366                 return -EINVAL;
2367
2368         if (max_rxq == 0) {
2369                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2370                 return -EINVAL;
2371         }
2372
2373         for (i = 0; i < reta_size; i++) {
2374                 idx = i / RTE_RETA_GROUP_SIZE;
2375                 shift = i % RTE_RETA_GROUP_SIZE;
2376                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2377                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2378                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2379                                 "the maximum rxq index: %u\n", idx, shift,
2380                                 reta_conf[idx].reta[shift], max_rxq);
2381                         return -EINVAL;
2382                 }
2383         }
2384
2385         return 0;
2386 }
2387
2388 int
2389 rte_eth_dev_rss_reta_update(uint16_t port_id,
2390                             struct rte_eth_rss_reta_entry64 *reta_conf,
2391                             uint16_t reta_size)
2392 {
2393         struct rte_eth_dev *dev;
2394         int ret;
2395
2396         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2397         /* Check mask bits */
2398         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2399         if (ret < 0)
2400                 return ret;
2401
2402         dev = &rte_eth_devices[port_id];
2403
2404         /* Check entry value */
2405         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2406                                 dev->data->nb_rx_queues);
2407         if (ret < 0)
2408                 return ret;
2409
2410         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2411         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2412 }
2413
2414 int
2415 rte_eth_dev_rss_reta_query(uint16_t port_id,
2416                            struct rte_eth_rss_reta_entry64 *reta_conf,
2417                            uint16_t reta_size)
2418 {
2419         struct rte_eth_dev *dev;
2420         int ret;
2421
2422         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2423
2424         /* Check mask bits */
2425         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2426         if (ret < 0)
2427                 return ret;
2428
2429         dev = &rte_eth_devices[port_id];
2430         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2431         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2432 }
2433
2434 int
2435 rte_eth_dev_rss_hash_update(uint16_t port_id,
2436                             struct rte_eth_rss_conf *rss_conf)
2437 {
2438         struct rte_eth_dev *dev;
2439
2440         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2441         dev = &rte_eth_devices[port_id];
2442         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2443         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2444 }
2445
2446 int
2447 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2448                               struct rte_eth_rss_conf *rss_conf)
2449 {
2450         struct rte_eth_dev *dev;
2451
2452         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2453         dev = &rte_eth_devices[port_id];
2454         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2455         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2456 }
2457
2458 int
2459 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2460                                 struct rte_eth_udp_tunnel *udp_tunnel)
2461 {
2462         struct rte_eth_dev *dev;
2463
2464         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2465         if (udp_tunnel == NULL) {
2466                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2467                 return -EINVAL;
2468         }
2469
2470         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2471                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2472                 return -EINVAL;
2473         }
2474
2475         dev = &rte_eth_devices[port_id];
2476         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2477         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
2478 }
2479
2480 int
2481 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2482                                    struct rte_eth_udp_tunnel *udp_tunnel)
2483 {
2484         struct rte_eth_dev *dev;
2485
2486         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2487         dev = &rte_eth_devices[port_id];
2488
2489         if (udp_tunnel == NULL) {
2490                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2491                 return -EINVAL;
2492         }
2493
2494         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2495                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2496                 return -EINVAL;
2497         }
2498
2499         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2500         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2501 }
2502
2503 int
2504 rte_eth_led_on(uint16_t port_id)
2505 {
2506         struct rte_eth_dev *dev;
2507
2508         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2509         dev = &rte_eth_devices[port_id];
2510         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2511         return (*dev->dev_ops->dev_led_on)(dev);
2512 }
2513
2514 int
2515 rte_eth_led_off(uint16_t port_id)
2516 {
2517         struct rte_eth_dev *dev;
2518
2519         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2520         dev = &rte_eth_devices[port_id];
2521         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2522         return (*dev->dev_ops->dev_led_off)(dev);
2523 }
2524
2525 /*
2526  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2527  * an empty spot.
2528  */
2529 static int
2530 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2531 {
2532         struct rte_eth_dev_info dev_info;
2533         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2534         unsigned i;
2535
2536         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2537         rte_eth_dev_info_get(port_id, &dev_info);
2538
2539         for (i = 0; i < dev_info.max_mac_addrs; i++)
2540                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2541                         return i;
2542
2543         return -1;
2544 }
2545
2546 static const struct ether_addr null_mac_addr;
2547
2548 int
2549 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2550                         uint32_t pool)
2551 {
2552         struct rte_eth_dev *dev;
2553         int index;
2554         uint64_t pool_mask;
2555         int ret;
2556
2557         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2558         dev = &rte_eth_devices[port_id];
2559         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2560
2561         if (is_zero_ether_addr(addr)) {
2562                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2563                         port_id);
2564                 return -EINVAL;
2565         }
2566         if (pool >= ETH_64_POOLS) {
2567                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2568                 return -EINVAL;
2569         }
2570
2571         index = get_mac_addr_index(port_id, addr);
2572         if (index < 0) {
2573                 index = get_mac_addr_index(port_id, &null_mac_addr);
2574                 if (index < 0) {
2575                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2576                                 port_id);
2577                         return -ENOSPC;
2578                 }
2579         } else {
2580                 pool_mask = dev->data->mac_pool_sel[index];
2581
2582                 /* Check if both MAC address and pool is already there, and do nothing */
2583                 if (pool_mask & (1ULL << pool))
2584                         return 0;
2585         }
2586
2587         /* Update NIC */
2588         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2589
2590         if (ret == 0) {
2591                 /* Update address in NIC data structure */
2592                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2593
2594                 /* Update pool bitmap in NIC data structure */
2595                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2596         }
2597
2598         return ret;
2599 }
2600
2601 int
2602 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2603 {
2604         struct rte_eth_dev *dev;
2605         int index;
2606
2607         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2608         dev = &rte_eth_devices[port_id];
2609         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2610
2611         index = get_mac_addr_index(port_id, addr);
2612         if (index == 0) {
2613                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2614                 return -EADDRINUSE;
2615         } else if (index < 0)
2616                 return 0;  /* Do nothing if address wasn't found */
2617
2618         /* Update NIC */
2619         (*dev->dev_ops->mac_addr_remove)(dev, index);
2620
2621         /* Update address in NIC data structure */
2622         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2623
2624         /* reset pool bitmap */
2625         dev->data->mac_pool_sel[index] = 0;
2626
2627         return 0;
2628 }
2629
2630 int
2631 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2632 {
2633         struct rte_eth_dev *dev;
2634
2635         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2636
2637         if (!is_valid_assigned_ether_addr(addr))
2638                 return -EINVAL;
2639
2640         dev = &rte_eth_devices[port_id];
2641         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2642
2643         /* Update default address in NIC data structure */
2644         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2645
2646         (*dev->dev_ops->mac_addr_set)(dev, addr);
2647
2648         return 0;
2649 }
2650
2651
2652 /*
2653  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2654  * an empty spot.
2655  */
2656 static int
2657 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2658 {
2659         struct rte_eth_dev_info dev_info;
2660         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2661         unsigned i;
2662
2663         rte_eth_dev_info_get(port_id, &dev_info);
2664         if (!dev->data->hash_mac_addrs)
2665                 return -1;
2666
2667         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2668                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2669                         ETHER_ADDR_LEN) == 0)
2670                         return i;
2671
2672         return -1;
2673 }
2674
2675 int
2676 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2677                                 uint8_t on)
2678 {
2679         int index;
2680         int ret;
2681         struct rte_eth_dev *dev;
2682
2683         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2684
2685         dev = &rte_eth_devices[port_id];
2686         if (is_zero_ether_addr(addr)) {
2687                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2688                         port_id);
2689                 return -EINVAL;
2690         }
2691
2692         index = get_hash_mac_addr_index(port_id, addr);
2693         /* Check if it's already there, and do nothing */
2694         if ((index >= 0) && on)
2695                 return 0;
2696
2697         if (index < 0) {
2698                 if (!on) {
2699                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2700                                 "set in UTA\n", port_id);
2701                         return -EINVAL;
2702                 }
2703
2704                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2705                 if (index < 0) {
2706                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2707                                         port_id);
2708                         return -ENOSPC;
2709                 }
2710         }
2711
2712         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2713         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2714         if (ret == 0) {
2715                 /* Update address in NIC data structure */
2716                 if (on)
2717                         ether_addr_copy(addr,
2718                                         &dev->data->hash_mac_addrs[index]);
2719                 else
2720                         ether_addr_copy(&null_mac_addr,
2721                                         &dev->data->hash_mac_addrs[index]);
2722         }
2723
2724         return ret;
2725 }
2726
2727 int
2728 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
2729 {
2730         struct rte_eth_dev *dev;
2731
2732         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2733
2734         dev = &rte_eth_devices[port_id];
2735
2736         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2737         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2738 }
2739
2740 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
2741                                         uint16_t tx_rate)
2742 {
2743         struct rte_eth_dev *dev;
2744         struct rte_eth_dev_info dev_info;
2745         struct rte_eth_link link;
2746
2747         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2748
2749         dev = &rte_eth_devices[port_id];
2750         rte_eth_dev_info_get(port_id, &dev_info);
2751         link = dev->data->dev_link;
2752
2753         if (queue_idx > dev_info.max_tx_queues) {
2754                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2755                                 "invalid queue id=%d\n", port_id, queue_idx);
2756                 return -EINVAL;
2757         }
2758
2759         if (tx_rate > link.link_speed) {
2760                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2761                                 "bigger than link speed= %d\n",
2762                         tx_rate, link.link_speed);
2763                 return -EINVAL;
2764         }
2765
2766         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2767         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2768 }
2769
2770 int
2771 rte_eth_mirror_rule_set(uint16_t port_id,
2772                         struct rte_eth_mirror_conf *mirror_conf,
2773                         uint8_t rule_id, uint8_t on)
2774 {
2775         struct rte_eth_dev *dev;
2776
2777         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2778         if (mirror_conf->rule_type == 0) {
2779                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2780                 return -EINVAL;
2781         }
2782
2783         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2784                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2785                                 ETH_64_POOLS - 1);
2786                 return -EINVAL;
2787         }
2788
2789         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2790              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2791             (mirror_conf->pool_mask == 0)) {
2792                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2793                 return -EINVAL;
2794         }
2795
2796         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2797             mirror_conf->vlan.vlan_mask == 0) {
2798                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2799                 return -EINVAL;
2800         }
2801
2802         dev = &rte_eth_devices[port_id];
2803         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2804
2805         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2806 }
2807
2808 int
2809 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
2810 {
2811         struct rte_eth_dev *dev;
2812
2813         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2814
2815         dev = &rte_eth_devices[port_id];
2816         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2817
2818         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2819 }
2820
2821 int
2822 rte_eth_dev_callback_register(uint16_t port_id,
2823                         enum rte_eth_event_type event,
2824                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2825 {
2826         struct rte_eth_dev *dev;
2827         struct rte_eth_dev_callback *user_cb;
2828
2829         if (!cb_fn)
2830                 return -EINVAL;
2831
2832         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2833
2834         dev = &rte_eth_devices[port_id];
2835         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2836
2837         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2838                 if (user_cb->cb_fn == cb_fn &&
2839                         user_cb->cb_arg == cb_arg &&
2840                         user_cb->event == event) {
2841                         break;
2842                 }
2843         }
2844
2845         /* create a new callback. */
2846         if (user_cb == NULL) {
2847                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2848                                         sizeof(struct rte_eth_dev_callback), 0);
2849                 if (user_cb != NULL) {
2850                         user_cb->cb_fn = cb_fn;
2851                         user_cb->cb_arg = cb_arg;
2852                         user_cb->event = event;
2853                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2854                 }
2855         }
2856
2857         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2858         return (user_cb == NULL) ? -ENOMEM : 0;
2859 }
2860
2861 int
2862 rte_eth_dev_callback_unregister(uint16_t port_id,
2863                         enum rte_eth_event_type event,
2864                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2865 {
2866         int ret;
2867         struct rte_eth_dev *dev;
2868         struct rte_eth_dev_callback *cb, *next;
2869
2870         if (!cb_fn)
2871                 return -EINVAL;
2872
2873         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2874
2875         dev = &rte_eth_devices[port_id];
2876         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2877
2878         ret = 0;
2879         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2880
2881                 next = TAILQ_NEXT(cb, next);
2882
2883                 if (cb->cb_fn != cb_fn || cb->event != event ||
2884                                 (cb->cb_arg != (void *)-1 &&
2885                                 cb->cb_arg != cb_arg))
2886                         continue;
2887
2888                 /*
2889                  * if this callback is not executing right now,
2890                  * then remove it.
2891                  */
2892                 if (cb->active == 0) {
2893                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2894                         rte_free(cb);
2895                 } else {
2896                         ret = -EAGAIN;
2897                 }
2898         }
2899
2900         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2901         return ret;
2902 }
2903
2904 int
2905 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2906         enum rte_eth_event_type event, void *cb_arg, void *ret_param)
2907 {
2908         struct rte_eth_dev_callback *cb_lst;
2909         struct rte_eth_dev_callback dev_cb;
2910         int rc = 0;
2911
2912         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2913         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2914                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2915                         continue;
2916                 dev_cb = *cb_lst;
2917                 cb_lst->active = 1;
2918                 if (cb_arg != NULL)
2919                         dev_cb.cb_arg = cb_arg;
2920                 if (ret_param != NULL)
2921                         dev_cb.ret_param = ret_param;
2922
2923                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2924                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2925                                 dev_cb.cb_arg, dev_cb.ret_param);
2926                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2927                 cb_lst->active = 0;
2928         }
2929         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2930         return rc;
2931 }
2932
2933 int
2934 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
2935 {
2936         uint32_t vec;
2937         struct rte_eth_dev *dev;
2938         struct rte_intr_handle *intr_handle;
2939         uint16_t qid;
2940         int rc;
2941
2942         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2943
2944         dev = &rte_eth_devices[port_id];
2945
2946         if (!dev->intr_handle) {
2947                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
2948                 return -ENOTSUP;
2949         }
2950
2951         intr_handle = dev->intr_handle;
2952         if (!intr_handle->intr_vec) {
2953                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2954                 return -EPERM;
2955         }
2956
2957         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2958                 vec = intr_handle->intr_vec[qid];
2959                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2960                 if (rc && rc != -EEXIST) {
2961                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2962                                         " op %d epfd %d vec %u\n",
2963                                         port_id, qid, op, epfd, vec);
2964                 }
2965         }
2966
2967         return 0;
2968 }
2969
2970 const struct rte_memzone *
2971 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2972                          uint16_t queue_id, size_t size, unsigned align,
2973                          int socket_id)
2974 {
2975         char z_name[RTE_MEMZONE_NAMESIZE];
2976         const struct rte_memzone *mz;
2977
2978         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2979                  dev->device->driver->name, ring_name,
2980                  dev->data->port_id, queue_id);
2981
2982         mz = rte_memzone_lookup(z_name);
2983         if (mz)
2984                 return mz;
2985
2986         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
2987 }
2988
2989 int
2990 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
2991                           int epfd, int op, void *data)
2992 {
2993         uint32_t vec;
2994         struct rte_eth_dev *dev;
2995         struct rte_intr_handle *intr_handle;
2996         int rc;
2997
2998         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2999
3000         dev = &rte_eth_devices[port_id];
3001         if (queue_id >= dev->data->nb_rx_queues) {
3002                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3003                 return -EINVAL;
3004         }
3005
3006         if (!dev->intr_handle) {
3007                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3008                 return -ENOTSUP;
3009         }
3010
3011         intr_handle = dev->intr_handle;
3012         if (!intr_handle->intr_vec) {
3013                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3014                 return -EPERM;
3015         }
3016
3017         vec = intr_handle->intr_vec[queue_id];
3018         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3019         if (rc && rc != -EEXIST) {
3020                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3021                                 " op %d epfd %d vec %u\n",
3022                                 port_id, queue_id, op, epfd, vec);
3023                 return rc;
3024         }
3025
3026         return 0;
3027 }
3028
3029 int
3030 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3031                            uint16_t queue_id)
3032 {
3033         struct rte_eth_dev *dev;
3034
3035         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3036
3037         dev = &rte_eth_devices[port_id];
3038
3039         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3040         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
3041 }
3042
3043 int
3044 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3045                             uint16_t queue_id)
3046 {
3047         struct rte_eth_dev *dev;
3048
3049         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3050
3051         dev = &rte_eth_devices[port_id];
3052
3053         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3054         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
3055 }
3056
3057
3058 int
3059 rte_eth_dev_filter_supported(uint16_t port_id,
3060                              enum rte_filter_type filter_type)
3061 {
3062         struct rte_eth_dev *dev;
3063
3064         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3065
3066         dev = &rte_eth_devices[port_id];
3067         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3068         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3069                                 RTE_ETH_FILTER_NOP, NULL);
3070 }
3071
3072 int
3073 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3074                        enum rte_filter_op filter_op, void *arg)
3075 {
3076         struct rte_eth_dev *dev;
3077
3078         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3079
3080         dev = &rte_eth_devices[port_id];
3081         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3082         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3083 }
3084
3085 void *
3086 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3087                 rte_rx_callback_fn fn, void *user_param)
3088 {
3089 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3090         rte_errno = ENOTSUP;
3091         return NULL;
3092 #endif
3093         /* check input parameters */
3094         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3095                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3096                 rte_errno = EINVAL;
3097                 return NULL;
3098         }
3099         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3100
3101         if (cb == NULL) {
3102                 rte_errno = ENOMEM;
3103                 return NULL;
3104         }
3105
3106         cb->fn.rx = fn;
3107         cb->param = user_param;
3108
3109         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3110         /* Add the callbacks in fifo order. */
3111         struct rte_eth_rxtx_callback *tail =
3112                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3113
3114         if (!tail) {
3115                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3116
3117         } else {
3118                 while (tail->next)
3119                         tail = tail->next;
3120                 tail->next = cb;
3121         }
3122         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3123
3124         return cb;
3125 }
3126
3127 void *
3128 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3129                 rte_rx_callback_fn fn, void *user_param)
3130 {
3131 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3132         rte_errno = ENOTSUP;
3133         return NULL;
3134 #endif
3135         /* check input parameters */
3136         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3137                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3138                 rte_errno = EINVAL;
3139                 return NULL;
3140         }
3141
3142         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3143
3144         if (cb == NULL) {
3145                 rte_errno = ENOMEM;
3146                 return NULL;
3147         }
3148
3149         cb->fn.rx = fn;
3150         cb->param = user_param;
3151
3152         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3153         /* Add the callbacks at fisrt position*/
3154         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3155         rte_smp_wmb();
3156         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3157         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3158
3159         return cb;
3160 }
3161
3162 void *
3163 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3164                 rte_tx_callback_fn fn, void *user_param)
3165 {
3166 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3167         rte_errno = ENOTSUP;
3168         return NULL;
3169 #endif
3170         /* check input parameters */
3171         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3172                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3173                 rte_errno = EINVAL;
3174                 return NULL;
3175         }
3176
3177         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3178
3179         if (cb == NULL) {
3180                 rte_errno = ENOMEM;
3181                 return NULL;
3182         }
3183
3184         cb->fn.tx = fn;
3185         cb->param = user_param;
3186
3187         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3188         /* Add the callbacks in fifo order. */
3189         struct rte_eth_rxtx_callback *tail =
3190                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3191
3192         if (!tail) {
3193                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3194
3195         } else {
3196                 while (tail->next)
3197                         tail = tail->next;
3198                 tail->next = cb;
3199         }
3200         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3201
3202         return cb;
3203 }
3204
3205 int
3206 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3207                 struct rte_eth_rxtx_callback *user_cb)
3208 {
3209 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3210         return -ENOTSUP;
3211 #endif
3212         /* Check input parameters. */
3213         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3214         if (user_cb == NULL ||
3215                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3216                 return -EINVAL;
3217
3218         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3219         struct rte_eth_rxtx_callback *cb;
3220         struct rte_eth_rxtx_callback **prev_cb;
3221         int ret = -EINVAL;
3222
3223         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3224         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3225         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3226                 cb = *prev_cb;
3227                 if (cb == user_cb) {
3228                         /* Remove the user cb from the callback list. */
3229                         *prev_cb = cb->next;
3230                         ret = 0;
3231                         break;
3232                 }
3233         }
3234         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3235
3236         return ret;
3237 }
3238
3239 int
3240 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3241                 struct rte_eth_rxtx_callback *user_cb)
3242 {
3243 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3244         return -ENOTSUP;
3245 #endif
3246         /* Check input parameters. */
3247         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3248         if (user_cb == NULL ||
3249                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3250                 return -EINVAL;
3251
3252         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3253         int ret = -EINVAL;
3254         struct rte_eth_rxtx_callback *cb;
3255         struct rte_eth_rxtx_callback **prev_cb;
3256
3257         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3258         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3259         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3260                 cb = *prev_cb;
3261                 if (cb == user_cb) {
3262                         /* Remove the user cb from the callback list. */
3263                         *prev_cb = cb->next;
3264                         ret = 0;
3265                         break;
3266                 }
3267         }
3268         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3269
3270         return ret;
3271 }
3272
3273 int
3274 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3275         struct rte_eth_rxq_info *qinfo)
3276 {
3277         struct rte_eth_dev *dev;
3278
3279         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3280
3281         if (qinfo == NULL)
3282                 return -EINVAL;
3283
3284         dev = &rte_eth_devices[port_id];
3285         if (queue_id >= dev->data->nb_rx_queues) {
3286                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3287                 return -EINVAL;
3288         }
3289
3290         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3291
3292         memset(qinfo, 0, sizeof(*qinfo));
3293         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3294         return 0;
3295 }
3296
3297 int
3298 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3299         struct rte_eth_txq_info *qinfo)
3300 {
3301         struct rte_eth_dev *dev;
3302
3303         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3304
3305         if (qinfo == NULL)
3306                 return -EINVAL;
3307
3308         dev = &rte_eth_devices[port_id];
3309         if (queue_id >= dev->data->nb_tx_queues) {
3310                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3311                 return -EINVAL;
3312         }
3313
3314         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3315
3316         memset(qinfo, 0, sizeof(*qinfo));
3317         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3318         return 0;
3319 }
3320
3321 int
3322 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3323                              struct ether_addr *mc_addr_set,
3324                              uint32_t nb_mc_addr)
3325 {
3326         struct rte_eth_dev *dev;
3327
3328         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3329
3330         dev = &rte_eth_devices[port_id];
3331         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3332         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3333 }
3334
3335 int
3336 rte_eth_timesync_enable(uint16_t port_id)
3337 {
3338         struct rte_eth_dev *dev;
3339
3340         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3341         dev = &rte_eth_devices[port_id];
3342
3343         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3344         return (*dev->dev_ops->timesync_enable)(dev);
3345 }
3346
3347 int
3348 rte_eth_timesync_disable(uint16_t port_id)
3349 {
3350         struct rte_eth_dev *dev;
3351
3352         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3353         dev = &rte_eth_devices[port_id];
3354
3355         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3356         return (*dev->dev_ops->timesync_disable)(dev);
3357 }
3358
3359 int
3360 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3361                                    uint32_t flags)
3362 {
3363         struct rte_eth_dev *dev;
3364
3365         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3366         dev = &rte_eth_devices[port_id];
3367
3368         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3369         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3370 }
3371
3372 int
3373 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3374                                    struct timespec *timestamp)
3375 {
3376         struct rte_eth_dev *dev;
3377
3378         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3379         dev = &rte_eth_devices[port_id];
3380
3381         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3382         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3383 }
3384
3385 int
3386 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3387 {
3388         struct rte_eth_dev *dev;
3389
3390         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3391         dev = &rte_eth_devices[port_id];
3392
3393         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3394         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3395 }
3396
3397 int
3398 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3399 {
3400         struct rte_eth_dev *dev;
3401
3402         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3403         dev = &rte_eth_devices[port_id];
3404
3405         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3406         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3407 }
3408
3409 int
3410 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3411 {
3412         struct rte_eth_dev *dev;
3413
3414         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3415         dev = &rte_eth_devices[port_id];
3416
3417         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3418         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3419 }
3420
3421 int
3422 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3423 {
3424         struct rte_eth_dev *dev;
3425
3426         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3427
3428         dev = &rte_eth_devices[port_id];
3429         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3430         return (*dev->dev_ops->get_reg)(dev, info);
3431 }
3432
3433 int
3434 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3435 {
3436         struct rte_eth_dev *dev;
3437
3438         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3439
3440         dev = &rte_eth_devices[port_id];
3441         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3442         return (*dev->dev_ops->get_eeprom_length)(dev);
3443 }
3444
3445 int
3446 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3447 {
3448         struct rte_eth_dev *dev;
3449
3450         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3451
3452         dev = &rte_eth_devices[port_id];
3453         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3454         return (*dev->dev_ops->get_eeprom)(dev, info);
3455 }
3456
3457 int
3458 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3459 {
3460         struct rte_eth_dev *dev;
3461
3462         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3463
3464         dev = &rte_eth_devices[port_id];
3465         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3466         return (*dev->dev_ops->set_eeprom)(dev, info);
3467 }
3468
3469 int
3470 rte_eth_dev_get_dcb_info(uint16_t port_id,
3471                              struct rte_eth_dcb_info *dcb_info)
3472 {
3473         struct rte_eth_dev *dev;
3474
3475         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3476
3477         dev = &rte_eth_devices[port_id];
3478         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3479
3480         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3481         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3482 }
3483
3484 int
3485 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3486                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3487 {
3488         struct rte_eth_dev *dev;
3489
3490         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3491         if (l2_tunnel == NULL) {
3492                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3493                 return -EINVAL;
3494         }
3495
3496         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3497                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3498                 return -EINVAL;
3499         }
3500
3501         dev = &rte_eth_devices[port_id];
3502         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3503                                 -ENOTSUP);
3504         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3505 }
3506
3507 int
3508 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3509                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3510                                   uint32_t mask,
3511                                   uint8_t en)
3512 {
3513         struct rte_eth_dev *dev;
3514
3515         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3516
3517         if (l2_tunnel == NULL) {
3518                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3519                 return -EINVAL;
3520         }
3521
3522         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3523                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3524                 return -EINVAL;
3525         }
3526
3527         if (mask == 0) {
3528                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3529                 return -EINVAL;
3530         }
3531
3532         dev = &rte_eth_devices[port_id];
3533         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3534                                 -ENOTSUP);
3535         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3536 }
3537
3538 static void
3539 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3540                            const struct rte_eth_desc_lim *desc_lim)
3541 {
3542         if (desc_lim->nb_align != 0)
3543                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3544
3545         if (desc_lim->nb_max != 0)
3546                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3547
3548         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3549 }
3550
3551 int
3552 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3553                                  uint16_t *nb_rx_desc,
3554                                  uint16_t *nb_tx_desc)
3555 {
3556         struct rte_eth_dev *dev;
3557         struct rte_eth_dev_info dev_info;
3558
3559         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3560
3561         dev = &rte_eth_devices[port_id];
3562         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3563
3564         rte_eth_dev_info_get(port_id, &dev_info);
3565
3566         if (nb_rx_desc != NULL)
3567                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3568
3569         if (nb_tx_desc != NULL)
3570                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3571
3572         return 0;
3573 }
3574
3575 int
3576 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3577 {
3578         struct rte_eth_dev *dev;
3579
3580         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3581
3582         if (pool == NULL)
3583                 return -EINVAL;
3584
3585         dev = &rte_eth_devices[port_id];
3586
3587         if (*dev->dev_ops->pool_ops_supported == NULL)
3588                 return 1; /* all pools are supported */
3589
3590         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
3591 }