ethdev: check queue id in Rx interrupt control
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40 #include <rte_ether.h>
41 #include <rte_telemetry.h>
42
43 #include "rte_ethdev_trace.h"
44 #include "rte_ethdev.h"
45 #include "rte_ethdev_driver.h"
46 #include "ethdev_profile.h"
47 #include "ethdev_private.h"
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS RTE_DIM(rte_stats_strings)
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS RTE_DIM(rte_rxq_stats_strings)
98
99 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
100         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
101         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
102 };
103 #define RTE_NB_TXQ_STATS RTE_DIM(rte_txq_stats_strings)
104
105 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
106         { DEV_RX_OFFLOAD_##_name, #_name }
107
108 static const struct {
109         uint64_t offload;
110         const char *name;
111 } rte_rx_offload_names[] = {
112         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
113         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
115         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
117         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
119         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
121         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
122         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
123         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
124         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
125         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
126         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
127         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
128         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
129         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
130         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
131 };
132
133 #undef RTE_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } rte_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
163         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
164 };
165
166 #undef RTE_TX_OFFLOAD_BIT2STR
167
168 /**
169  * The user application callback description.
170  *
171  * It contains callback address to be registered by user application,
172  * the pointer to the parameters for callback, and the event type.
173  */
174 struct rte_eth_dev_callback {
175         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
176         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
177         void *cb_arg;                           /**< Parameter for callback */
178         void *ret_param;                        /**< Return parameter */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 int
189 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
190 {
191         int ret;
192         struct rte_devargs devargs = {.args = NULL};
193         const char *bus_param_key;
194         char *bus_str = NULL;
195         char *cls_str = NULL;
196         int str_size;
197
198         memset(iter, 0, sizeof(*iter));
199
200         /*
201          * The devargs string may use various syntaxes:
202          *   - 0000:08:00.0,representor=[1-3]
203          *   - pci:0000:06:00.0,representor=[0,5]
204          *   - class=eth,mac=00:11:22:33:44:55
205          * A new syntax is in development (not yet supported):
206          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
207          */
208
209         /*
210          * Handle pure class filter (i.e. without any bus-level argument),
211          * from future new syntax.
212          * rte_devargs_parse() is not yet supporting the new syntax,
213          * that's why this simple case is temporarily parsed here.
214          */
215 #define iter_anybus_str "class=eth,"
216         if (strncmp(devargs_str, iter_anybus_str,
217                         strlen(iter_anybus_str)) == 0) {
218                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
219                 goto end;
220         }
221
222         /* Split bus, device and parameters. */
223         ret = rte_devargs_parse(&devargs, devargs_str);
224         if (ret != 0)
225                 goto error;
226
227         /*
228          * Assume parameters of old syntax can match only at ethdev level.
229          * Extra parameters will be ignored, thanks to "+" prefix.
230          */
231         str_size = strlen(devargs.args) + 2;
232         cls_str = malloc(str_size);
233         if (cls_str == NULL) {
234                 ret = -ENOMEM;
235                 goto error;
236         }
237         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
238         if (ret != str_size - 1) {
239                 ret = -EINVAL;
240                 goto error;
241         }
242         iter->cls_str = cls_str;
243         free(devargs.args); /* allocated by rte_devargs_parse() */
244         devargs.args = NULL;
245
246         iter->bus = devargs.bus;
247         if (iter->bus->dev_iterate == NULL) {
248                 ret = -ENOTSUP;
249                 goto error;
250         }
251
252         /* Convert bus args to new syntax for use with new API dev_iterate. */
253         if (strcmp(iter->bus->name, "vdev") == 0) {
254                 bus_param_key = "name";
255         } else if (strcmp(iter->bus->name, "pci") == 0) {
256                 bus_param_key = "addr";
257         } else {
258                 ret = -ENOTSUP;
259                 goto error;
260         }
261         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
262         bus_str = malloc(str_size);
263         if (bus_str == NULL) {
264                 ret = -ENOMEM;
265                 goto error;
266         }
267         ret = snprintf(bus_str, str_size, "%s=%s",
268                         bus_param_key, devargs.name);
269         if (ret != str_size - 1) {
270                 ret = -EINVAL;
271                 goto error;
272         }
273         iter->bus_str = bus_str;
274
275 end:
276         iter->cls = rte_class_find_by_name("eth");
277         return 0;
278
279 error:
280         if (ret == -ENOTSUP)
281                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
282                                 iter->bus->name);
283         free(devargs.args);
284         free(bus_str);
285         free(cls_str);
286         return ret;
287 }
288
289 uint16_t
290 rte_eth_iterator_next(struct rte_dev_iterator *iter)
291 {
292         if (iter->cls == NULL) /* invalid ethdev iterator */
293                 return RTE_MAX_ETHPORTS;
294
295         do { /* loop to try all matching rte_device */
296                 /* If not pure ethdev filter and */
297                 if (iter->bus != NULL &&
298                                 /* not in middle of rte_eth_dev iteration, */
299                                 iter->class_device == NULL) {
300                         /* get next rte_device to try. */
301                         iter->device = iter->bus->dev_iterate(
302                                         iter->device, iter->bus_str, iter);
303                         if (iter->device == NULL)
304                                 break; /* no more rte_device candidate */
305                 }
306                 /* A device is matching bus part, need to check ethdev part. */
307                 iter->class_device = iter->cls->dev_iterate(
308                                 iter->class_device, iter->cls_str, iter);
309                 if (iter->class_device != NULL)
310                         return eth_dev_to_id(iter->class_device); /* match */
311         } while (iter->bus != NULL); /* need to try next rte_device */
312
313         /* No more ethdev port to iterate. */
314         rte_eth_iterator_cleanup(iter);
315         return RTE_MAX_ETHPORTS;
316 }
317
318 void
319 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
320 {
321         if (iter->bus_str == NULL)
322                 return; /* nothing to free in pure class filter */
323         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
324         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
325         memset(iter, 0, sizeof(*iter));
326 }
327
328 uint16_t
329 rte_eth_find_next(uint16_t port_id)
330 {
331         while (port_id < RTE_MAX_ETHPORTS &&
332                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
333                 port_id++;
334
335         if (port_id >= RTE_MAX_ETHPORTS)
336                 return RTE_MAX_ETHPORTS;
337
338         return port_id;
339 }
340
341 /*
342  * Macro to iterate over all valid ports for internal usage.
343  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
344  */
345 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
346         for (port_id = rte_eth_find_next(0); \
347              port_id < RTE_MAX_ETHPORTS; \
348              port_id = rte_eth_find_next(port_id + 1))
349
350 uint16_t
351 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
352 {
353         port_id = rte_eth_find_next(port_id);
354         while (port_id < RTE_MAX_ETHPORTS &&
355                         rte_eth_devices[port_id].device != parent)
356                 port_id = rte_eth_find_next(port_id + 1);
357
358         return port_id;
359 }
360
361 uint16_t
362 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
363 {
364         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
365         return rte_eth_find_next_of(port_id,
366                         rte_eth_devices[ref_port_id].device);
367 }
368
369 static void
370 rte_eth_dev_shared_data_prepare(void)
371 {
372         const unsigned flags = 0;
373         const struct rte_memzone *mz;
374
375         rte_spinlock_lock(&rte_eth_shared_data_lock);
376
377         if (rte_eth_dev_shared_data == NULL) {
378                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
379                         /* Allocate port data and ownership shared memory. */
380                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
381                                         sizeof(*rte_eth_dev_shared_data),
382                                         rte_socket_id(), flags);
383                 } else
384                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
385                 if (mz == NULL)
386                         rte_panic("Cannot allocate ethdev shared data\n");
387
388                 rte_eth_dev_shared_data = mz->addr;
389                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
390                         rte_eth_dev_shared_data->next_owner_id =
391                                         RTE_ETH_DEV_NO_OWNER + 1;
392                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
393                         memset(rte_eth_dev_shared_data->data, 0,
394                                sizeof(rte_eth_dev_shared_data->data));
395                 }
396         }
397
398         rte_spinlock_unlock(&rte_eth_shared_data_lock);
399 }
400
401 static bool
402 is_allocated(const struct rte_eth_dev *ethdev)
403 {
404         return ethdev->data->name[0] != '\0';
405 }
406
407 static struct rte_eth_dev *
408 _rte_eth_dev_allocated(const char *name)
409 {
410         unsigned i;
411
412         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
413                 if (rte_eth_devices[i].data != NULL &&
414                     strcmp(rte_eth_devices[i].data->name, name) == 0)
415                         return &rte_eth_devices[i];
416         }
417         return NULL;
418 }
419
420 struct rte_eth_dev *
421 rte_eth_dev_allocated(const char *name)
422 {
423         struct rte_eth_dev *ethdev;
424
425         rte_eth_dev_shared_data_prepare();
426
427         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
428
429         ethdev = _rte_eth_dev_allocated(name);
430
431         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
432
433         return ethdev;
434 }
435
436 static uint16_t
437 rte_eth_dev_find_free_port(void)
438 {
439         unsigned i;
440
441         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
442                 /* Using shared name field to find a free port. */
443                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
444                         RTE_ASSERT(rte_eth_devices[i].state ==
445                                    RTE_ETH_DEV_UNUSED);
446                         return i;
447                 }
448         }
449         return RTE_MAX_ETHPORTS;
450 }
451
452 static struct rte_eth_dev *
453 eth_dev_get(uint16_t port_id)
454 {
455         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
456
457         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
458
459         return eth_dev;
460 }
461
462 struct rte_eth_dev *
463 rte_eth_dev_allocate(const char *name)
464 {
465         uint16_t port_id;
466         struct rte_eth_dev *eth_dev = NULL;
467         size_t name_len;
468
469         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
470         if (name_len == 0) {
471                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
472                 return NULL;
473         }
474
475         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
476                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
477                 return NULL;
478         }
479
480         rte_eth_dev_shared_data_prepare();
481
482         /* Synchronize port creation between primary and secondary threads. */
483         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
484
485         if (_rte_eth_dev_allocated(name) != NULL) {
486                 RTE_ETHDEV_LOG(ERR,
487                         "Ethernet device with name %s already allocated\n",
488                         name);
489                 goto unlock;
490         }
491
492         port_id = rte_eth_dev_find_free_port();
493         if (port_id == RTE_MAX_ETHPORTS) {
494                 RTE_ETHDEV_LOG(ERR,
495                         "Reached maximum number of Ethernet ports\n");
496                 goto unlock;
497         }
498
499         eth_dev = eth_dev_get(port_id);
500         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
501         eth_dev->data->port_id = port_id;
502         eth_dev->data->mtu = RTE_ETHER_MTU;
503         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
504
505 unlock:
506         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
507
508         return eth_dev;
509 }
510
511 /*
512  * Attach to a port already registered by the primary process, which
513  * makes sure that the same device would have the same port id both
514  * in the primary and secondary process.
515  */
516 struct rte_eth_dev *
517 rte_eth_dev_attach_secondary(const char *name)
518 {
519         uint16_t i;
520         struct rte_eth_dev *eth_dev = NULL;
521
522         rte_eth_dev_shared_data_prepare();
523
524         /* Synchronize port attachment to primary port creation and release. */
525         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
526
527         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
528                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
529                         break;
530         }
531         if (i == RTE_MAX_ETHPORTS) {
532                 RTE_ETHDEV_LOG(ERR,
533                         "Device %s is not driven by the primary process\n",
534                         name);
535         } else {
536                 eth_dev = eth_dev_get(i);
537                 RTE_ASSERT(eth_dev->data->port_id == i);
538         }
539
540         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
541         return eth_dev;
542 }
543
544 int
545 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
546 {
547         if (eth_dev == NULL)
548                 return -EINVAL;
549
550         rte_eth_dev_shared_data_prepare();
551
552         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
553                 rte_eth_dev_callback_process(eth_dev,
554                                 RTE_ETH_EVENT_DESTROY, NULL);
555
556         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
557
558         eth_dev->state = RTE_ETH_DEV_UNUSED;
559         eth_dev->device = NULL;
560         eth_dev->intr_handle = NULL;
561
562         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
563                 rte_free(eth_dev->data->rx_queues);
564                 rte_free(eth_dev->data->tx_queues);
565                 rte_free(eth_dev->data->mac_addrs);
566                 rte_free(eth_dev->data->hash_mac_addrs);
567                 rte_free(eth_dev->data->dev_private);
568                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
569                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
570         }
571
572         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
573
574         return 0;
575 }
576
577 int
578 rte_eth_dev_is_valid_port(uint16_t port_id)
579 {
580         if (port_id >= RTE_MAX_ETHPORTS ||
581             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
582                 return 0;
583         else
584                 return 1;
585 }
586
587 static int
588 rte_eth_is_valid_owner_id(uint64_t owner_id)
589 {
590         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
591             rte_eth_dev_shared_data->next_owner_id <= owner_id)
592                 return 0;
593         return 1;
594 }
595
596 uint64_t
597 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
598 {
599         port_id = rte_eth_find_next(port_id);
600         while (port_id < RTE_MAX_ETHPORTS &&
601                         rte_eth_devices[port_id].data->owner.id != owner_id)
602                 port_id = rte_eth_find_next(port_id + 1);
603
604         return port_id;
605 }
606
607 int
608 rte_eth_dev_owner_new(uint64_t *owner_id)
609 {
610         rte_eth_dev_shared_data_prepare();
611
612         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
613
614         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
615
616         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
617         return 0;
618 }
619
620 static int
621 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
622                        const struct rte_eth_dev_owner *new_owner)
623 {
624         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
625         struct rte_eth_dev_owner *port_owner;
626
627         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
628                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
629                         port_id);
630                 return -ENODEV;
631         }
632
633         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
634             !rte_eth_is_valid_owner_id(old_owner_id)) {
635                 RTE_ETHDEV_LOG(ERR,
636                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
637                        old_owner_id, new_owner->id);
638                 return -EINVAL;
639         }
640
641         port_owner = &rte_eth_devices[port_id].data->owner;
642         if (port_owner->id != old_owner_id) {
643                 RTE_ETHDEV_LOG(ERR,
644                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
645                         port_id, port_owner->name, port_owner->id);
646                 return -EPERM;
647         }
648
649         /* can not truncate (same structure) */
650         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
651
652         port_owner->id = new_owner->id;
653
654         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
655                 port_id, new_owner->name, new_owner->id);
656
657         return 0;
658 }
659
660 int
661 rte_eth_dev_owner_set(const uint16_t port_id,
662                       const struct rte_eth_dev_owner *owner)
663 {
664         int ret;
665
666         rte_eth_dev_shared_data_prepare();
667
668         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
669
670         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
671
672         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
673         return ret;
674 }
675
676 int
677 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
678 {
679         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
680                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
681         int ret;
682
683         rte_eth_dev_shared_data_prepare();
684
685         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
686
687         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
688
689         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
690         return ret;
691 }
692
693 int
694 rte_eth_dev_owner_delete(const uint64_t owner_id)
695 {
696         uint16_t port_id;
697         int ret = 0;
698
699         rte_eth_dev_shared_data_prepare();
700
701         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
702
703         if (rte_eth_is_valid_owner_id(owner_id)) {
704                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
705                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
706                                 memset(&rte_eth_devices[port_id].data->owner, 0,
707                                        sizeof(struct rte_eth_dev_owner));
708                 RTE_ETHDEV_LOG(NOTICE,
709                         "All port owners owned by %016"PRIx64" identifier have removed\n",
710                         owner_id);
711         } else {
712                 RTE_ETHDEV_LOG(ERR,
713                                "Invalid owner id=%016"PRIx64"\n",
714                                owner_id);
715                 ret = -EINVAL;
716         }
717
718         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
719
720         return ret;
721 }
722
723 int
724 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
725 {
726         int ret = 0;
727         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
728
729         rte_eth_dev_shared_data_prepare();
730
731         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
732
733         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
734                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
735                         port_id);
736                 ret = -ENODEV;
737         } else {
738                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
739         }
740
741         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
742         return ret;
743 }
744
745 int
746 rte_eth_dev_socket_id(uint16_t port_id)
747 {
748         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
749         return rte_eth_devices[port_id].data->numa_node;
750 }
751
752 void *
753 rte_eth_dev_get_sec_ctx(uint16_t port_id)
754 {
755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
756         return rte_eth_devices[port_id].security_ctx;
757 }
758
759 uint16_t
760 rte_eth_dev_count_avail(void)
761 {
762         uint16_t p;
763         uint16_t count;
764
765         count = 0;
766
767         RTE_ETH_FOREACH_DEV(p)
768                 count++;
769
770         return count;
771 }
772
773 uint16_t
774 rte_eth_dev_count_total(void)
775 {
776         uint16_t port, count = 0;
777
778         RTE_ETH_FOREACH_VALID_DEV(port)
779                 count++;
780
781         return count;
782 }
783
784 int
785 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
786 {
787         char *tmp;
788
789         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
790
791         if (name == NULL) {
792                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
793                 return -EINVAL;
794         }
795
796         /* shouldn't check 'rte_eth_devices[i].data',
797          * because it might be overwritten by VDEV PMD */
798         tmp = rte_eth_dev_shared_data->data[port_id].name;
799         strcpy(name, tmp);
800         return 0;
801 }
802
803 int
804 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
805 {
806         uint32_t pid;
807
808         if (name == NULL) {
809                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
810                 return -EINVAL;
811         }
812
813         RTE_ETH_FOREACH_VALID_DEV(pid)
814                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
815                         *port_id = pid;
816                         return 0;
817                 }
818
819         return -ENODEV;
820 }
821
822 static int
823 eth_err(uint16_t port_id, int ret)
824 {
825         if (ret == 0)
826                 return 0;
827         if (rte_eth_dev_is_removed(port_id))
828                 return -EIO;
829         return ret;
830 }
831
832 static int
833 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
834 {
835         uint16_t old_nb_queues = dev->data->nb_rx_queues;
836         void **rxq;
837         unsigned i;
838
839         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
840                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
841                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
842                                 RTE_CACHE_LINE_SIZE);
843                 if (dev->data->rx_queues == NULL) {
844                         dev->data->nb_rx_queues = 0;
845                         return -(ENOMEM);
846                 }
847         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
848                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
849
850                 rxq = dev->data->rx_queues;
851
852                 for (i = nb_queues; i < old_nb_queues; i++)
853                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
854                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
855                                 RTE_CACHE_LINE_SIZE);
856                 if (rxq == NULL)
857                         return -(ENOMEM);
858                 if (nb_queues > old_nb_queues) {
859                         uint16_t new_qs = nb_queues - old_nb_queues;
860
861                         memset(rxq + old_nb_queues, 0,
862                                 sizeof(rxq[0]) * new_qs);
863                 }
864
865                 dev->data->rx_queues = rxq;
866
867         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
868                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
869
870                 rxq = dev->data->rx_queues;
871
872                 for (i = nb_queues; i < old_nb_queues; i++)
873                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
874
875                 rte_free(dev->data->rx_queues);
876                 dev->data->rx_queues = NULL;
877         }
878         dev->data->nb_rx_queues = nb_queues;
879         return 0;
880 }
881
882 static int
883 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
884 {
885         uint16_t port_id;
886
887         if (rx_queue_id >= dev->data->nb_rx_queues) {
888                 port_id = dev->data->port_id;
889                 RTE_ETHDEV_LOG(ERR,
890                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
891                                rx_queue_id, port_id);
892                 return -EINVAL;
893         }
894
895         if (dev->data->rx_queues[rx_queue_id] == NULL) {
896                 port_id = dev->data->port_id;
897                 RTE_ETHDEV_LOG(ERR,
898                                "Queue %u of device with port_id=%u has not been setup\n",
899                                rx_queue_id, port_id);
900                 return -EINVAL;
901         }
902
903         return 0;
904 }
905
906 static int
907 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
908 {
909         uint16_t port_id;
910
911         if (tx_queue_id >= dev->data->nb_tx_queues) {
912                 port_id = dev->data->port_id;
913                 RTE_ETHDEV_LOG(ERR,
914                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
915                                tx_queue_id, port_id);
916                 return -EINVAL;
917         }
918
919         if (dev->data->tx_queues[tx_queue_id] == NULL) {
920                 port_id = dev->data->port_id;
921                 RTE_ETHDEV_LOG(ERR,
922                                "Queue %u of device with port_id=%u has not been setup\n",
923                                tx_queue_id, port_id);
924                 return -EINVAL;
925         }
926
927         return 0;
928 }
929
930 int
931 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
932 {
933         struct rte_eth_dev *dev;
934         int ret;
935
936         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
937
938         dev = &rte_eth_devices[port_id];
939         if (!dev->data->dev_started) {
940                 RTE_ETHDEV_LOG(ERR,
941                         "Port %u must be started before start any queue\n",
942                         port_id);
943                 return -EINVAL;
944         }
945
946         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
947         if (ret != 0)
948                 return ret;
949
950         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
951
952         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
953                 RTE_ETHDEV_LOG(INFO,
954                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
955                         rx_queue_id, port_id);
956                 return -EINVAL;
957         }
958
959         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
960                 RTE_ETHDEV_LOG(INFO,
961                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
962                         rx_queue_id, port_id);
963                 return 0;
964         }
965
966         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
967                                                              rx_queue_id));
968
969 }
970
971 int
972 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
973 {
974         struct rte_eth_dev *dev;
975         int ret;
976
977         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
978
979         dev = &rte_eth_devices[port_id];
980
981         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
982         if (ret != 0)
983                 return ret;
984
985         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
986
987         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
988                 RTE_ETHDEV_LOG(INFO,
989                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
990                         rx_queue_id, port_id);
991                 return -EINVAL;
992         }
993
994         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
995                 RTE_ETHDEV_LOG(INFO,
996                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
997                         rx_queue_id, port_id);
998                 return 0;
999         }
1000
1001         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1002
1003 }
1004
1005 int
1006 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1007 {
1008         struct rte_eth_dev *dev;
1009         int ret;
1010
1011         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1012
1013         dev = &rte_eth_devices[port_id];
1014         if (!dev->data->dev_started) {
1015                 RTE_ETHDEV_LOG(ERR,
1016                         "Port %u must be started before start any queue\n",
1017                         port_id);
1018                 return -EINVAL;
1019         }
1020
1021         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1022         if (ret != 0)
1023                 return ret;
1024
1025         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1026
1027         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1028                 RTE_ETHDEV_LOG(INFO,
1029                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1030                         tx_queue_id, port_id);
1031                 return -EINVAL;
1032         }
1033
1034         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1035                 RTE_ETHDEV_LOG(INFO,
1036                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1037                         tx_queue_id, port_id);
1038                 return 0;
1039         }
1040
1041         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1042 }
1043
1044 int
1045 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1046 {
1047         struct rte_eth_dev *dev;
1048         int ret;
1049
1050         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1051
1052         dev = &rte_eth_devices[port_id];
1053
1054         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1055         if (ret != 0)
1056                 return ret;
1057
1058         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1059
1060         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1061                 RTE_ETHDEV_LOG(INFO,
1062                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1063                         tx_queue_id, port_id);
1064                 return -EINVAL;
1065         }
1066
1067         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1068                 RTE_ETHDEV_LOG(INFO,
1069                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1070                         tx_queue_id, port_id);
1071                 return 0;
1072         }
1073
1074         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1075
1076 }
1077
1078 static int
1079 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1080 {
1081         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1082         void **txq;
1083         unsigned i;
1084
1085         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1086                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1087                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1088                                                    RTE_CACHE_LINE_SIZE);
1089                 if (dev->data->tx_queues == NULL) {
1090                         dev->data->nb_tx_queues = 0;
1091                         return -(ENOMEM);
1092                 }
1093         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1094                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1095
1096                 txq = dev->data->tx_queues;
1097
1098                 for (i = nb_queues; i < old_nb_queues; i++)
1099                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1100                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1101                                   RTE_CACHE_LINE_SIZE);
1102                 if (txq == NULL)
1103                         return -ENOMEM;
1104                 if (nb_queues > old_nb_queues) {
1105                         uint16_t new_qs = nb_queues - old_nb_queues;
1106
1107                         memset(txq + old_nb_queues, 0,
1108                                sizeof(txq[0]) * new_qs);
1109                 }
1110
1111                 dev->data->tx_queues = txq;
1112
1113         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1114                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1115
1116                 txq = dev->data->tx_queues;
1117
1118                 for (i = nb_queues; i < old_nb_queues; i++)
1119                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1120
1121                 rte_free(dev->data->tx_queues);
1122                 dev->data->tx_queues = NULL;
1123         }
1124         dev->data->nb_tx_queues = nb_queues;
1125         return 0;
1126 }
1127
1128 uint32_t
1129 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1130 {
1131         switch (speed) {
1132         case ETH_SPEED_NUM_10M:
1133                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1134         case ETH_SPEED_NUM_100M:
1135                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1136         case ETH_SPEED_NUM_1G:
1137                 return ETH_LINK_SPEED_1G;
1138         case ETH_SPEED_NUM_2_5G:
1139                 return ETH_LINK_SPEED_2_5G;
1140         case ETH_SPEED_NUM_5G:
1141                 return ETH_LINK_SPEED_5G;
1142         case ETH_SPEED_NUM_10G:
1143                 return ETH_LINK_SPEED_10G;
1144         case ETH_SPEED_NUM_20G:
1145                 return ETH_LINK_SPEED_20G;
1146         case ETH_SPEED_NUM_25G:
1147                 return ETH_LINK_SPEED_25G;
1148         case ETH_SPEED_NUM_40G:
1149                 return ETH_LINK_SPEED_40G;
1150         case ETH_SPEED_NUM_50G:
1151                 return ETH_LINK_SPEED_50G;
1152         case ETH_SPEED_NUM_56G:
1153                 return ETH_LINK_SPEED_56G;
1154         case ETH_SPEED_NUM_100G:
1155                 return ETH_LINK_SPEED_100G;
1156         case ETH_SPEED_NUM_200G:
1157                 return ETH_LINK_SPEED_200G;
1158         default:
1159                 return 0;
1160         }
1161 }
1162
1163 const char *
1164 rte_eth_dev_rx_offload_name(uint64_t offload)
1165 {
1166         const char *name = "UNKNOWN";
1167         unsigned int i;
1168
1169         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1170                 if (offload == rte_rx_offload_names[i].offload) {
1171                         name = rte_rx_offload_names[i].name;
1172                         break;
1173                 }
1174         }
1175
1176         return name;
1177 }
1178
1179 const char *
1180 rte_eth_dev_tx_offload_name(uint64_t offload)
1181 {
1182         const char *name = "UNKNOWN";
1183         unsigned int i;
1184
1185         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1186                 if (offload == rte_tx_offload_names[i].offload) {
1187                         name = rte_tx_offload_names[i].name;
1188                         break;
1189                 }
1190         }
1191
1192         return name;
1193 }
1194
1195 static inline int
1196 check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1197                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1198 {
1199         int ret = 0;
1200
1201         if (dev_info_size == 0) {
1202                 if (config_size != max_rx_pkt_len) {
1203                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1204                                        " %u != %u is not allowed\n",
1205                                        port_id, config_size, max_rx_pkt_len);
1206                         ret = -EINVAL;
1207                 }
1208         } else if (config_size > dev_info_size) {
1209                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1210                                "> max allowed value %u\n", port_id, config_size,
1211                                dev_info_size);
1212                 ret = -EINVAL;
1213         } else if (config_size < RTE_ETHER_MIN_LEN) {
1214                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1215                                "< min allowed value %u\n", port_id, config_size,
1216                                (unsigned int)RTE_ETHER_MIN_LEN);
1217                 ret = -EINVAL;
1218         }
1219         return ret;
1220 }
1221
1222 /*
1223  * Validate offloads that are requested through rte_eth_dev_configure against
1224  * the offloads successfully set by the ethernet device.
1225  *
1226  * @param port_id
1227  *   The port identifier of the Ethernet device.
1228  * @param req_offloads
1229  *   The offloads that have been requested through `rte_eth_dev_configure`.
1230  * @param set_offloads
1231  *   The offloads successfully set by the ethernet device.
1232  * @param offload_type
1233  *   The offload type i.e. Rx/Tx string.
1234  * @param offload_name
1235  *   The function that prints the offload name.
1236  * @return
1237  *   - (0) if validation successful.
1238  *   - (-EINVAL) if requested offload has been silently disabled.
1239  *
1240  */
1241 static int
1242 validate_offloads(uint16_t port_id, uint64_t req_offloads,
1243                   uint64_t set_offloads, const char *offload_type,
1244                   const char *(*offload_name)(uint64_t))
1245 {
1246         uint64_t offloads_diff = req_offloads ^ set_offloads;
1247         uint64_t offload;
1248         int ret = 0;
1249
1250         while (offloads_diff != 0) {
1251                 /* Check if any offload is requested but not enabled. */
1252                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1253                 if (offload & req_offloads) {
1254                         RTE_ETHDEV_LOG(ERR,
1255                                 "Port %u failed to enable %s offload %s\n",
1256                                 port_id, offload_type, offload_name(offload));
1257                         ret = -EINVAL;
1258                 }
1259
1260                 /* Check if offload couldn't be disabled. */
1261                 if (offload & set_offloads) {
1262                         RTE_ETHDEV_LOG(DEBUG,
1263                                 "Port %u %s offload %s is not requested but enabled\n",
1264                                 port_id, offload_type, offload_name(offload));
1265                 }
1266
1267                 offloads_diff &= ~offload;
1268         }
1269
1270         return ret;
1271 }
1272
1273 int
1274 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1275                       const struct rte_eth_conf *dev_conf)
1276 {
1277         struct rte_eth_dev *dev;
1278         struct rte_eth_dev_info dev_info;
1279         struct rte_eth_conf orig_conf;
1280         int diag;
1281         int ret;
1282
1283         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1284
1285         dev = &rte_eth_devices[port_id];
1286
1287         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1288
1289         if (dev->data->dev_started) {
1290                 RTE_ETHDEV_LOG(ERR,
1291                         "Port %u must be stopped to allow configuration\n",
1292                         port_id);
1293                 return -EBUSY;
1294         }
1295
1296          /* Store original config, as rollback required on failure */
1297         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1298
1299         /*
1300          * Copy the dev_conf parameter into the dev structure.
1301          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1302          */
1303         if (dev_conf != &dev->data->dev_conf)
1304                 memcpy(&dev->data->dev_conf, dev_conf,
1305                        sizeof(dev->data->dev_conf));
1306
1307         ret = rte_eth_dev_info_get(port_id, &dev_info);
1308         if (ret != 0)
1309                 goto rollback;
1310
1311         /* If number of queues specified by application for both Rx and Tx is
1312          * zero, use driver preferred values. This cannot be done individually
1313          * as it is valid for either Tx or Rx (but not both) to be zero.
1314          * If driver does not provide any preferred valued, fall back on
1315          * EAL defaults.
1316          */
1317         if (nb_rx_q == 0 && nb_tx_q == 0) {
1318                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1319                 if (nb_rx_q == 0)
1320                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1321                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1322                 if (nb_tx_q == 0)
1323                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1324         }
1325
1326         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1327                 RTE_ETHDEV_LOG(ERR,
1328                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1329                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1330                 ret = -EINVAL;
1331                 goto rollback;
1332         }
1333
1334         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1335                 RTE_ETHDEV_LOG(ERR,
1336                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1337                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1338                 ret = -EINVAL;
1339                 goto rollback;
1340         }
1341
1342         /*
1343          * Check that the numbers of RX and TX queues are not greater
1344          * than the maximum number of RX and TX queues supported by the
1345          * configured device.
1346          */
1347         if (nb_rx_q > dev_info.max_rx_queues) {
1348                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1349                         port_id, nb_rx_q, dev_info.max_rx_queues);
1350                 ret = -EINVAL;
1351                 goto rollback;
1352         }
1353
1354         if (nb_tx_q > dev_info.max_tx_queues) {
1355                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1356                         port_id, nb_tx_q, dev_info.max_tx_queues);
1357                 ret = -EINVAL;
1358                 goto rollback;
1359         }
1360
1361         /* Check that the device supports requested interrupts */
1362         if ((dev_conf->intr_conf.lsc == 1) &&
1363                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1364                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1365                         dev->device->driver->name);
1366                 ret = -EINVAL;
1367                 goto rollback;
1368         }
1369         if ((dev_conf->intr_conf.rmv == 1) &&
1370                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1371                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1372                         dev->device->driver->name);
1373                 ret = -EINVAL;
1374                 goto rollback;
1375         }
1376
1377         /*
1378          * If jumbo frames are enabled, check that the maximum RX packet
1379          * length is supported by the configured device.
1380          */
1381         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1382                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1383                         RTE_ETHDEV_LOG(ERR,
1384                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1385                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1386                                 dev_info.max_rx_pktlen);
1387                         ret = -EINVAL;
1388                         goto rollback;
1389                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1390                         RTE_ETHDEV_LOG(ERR,
1391                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1392                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1393                                 (unsigned int)RTE_ETHER_MIN_LEN);
1394                         ret = -EINVAL;
1395                         goto rollback;
1396                 }
1397         } else {
1398                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1399                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1400                         /* Use default value */
1401                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1402                                                         RTE_ETHER_MAX_LEN;
1403         }
1404
1405         /*
1406          * If LRO is enabled, check that the maximum aggregated packet
1407          * size is supported by the configured device.
1408          */
1409         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1410                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1411                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1412                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1413                 ret = check_lro_pkt_size(port_id,
1414                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1415                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1416                                 dev_info.max_lro_pkt_size);
1417                 if (ret != 0)
1418                         goto rollback;
1419         }
1420
1421         /* Any requested offloading must be within its device capabilities */
1422         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1423              dev_conf->rxmode.offloads) {
1424                 RTE_ETHDEV_LOG(ERR,
1425                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1426                         "capabilities 0x%"PRIx64" in %s()\n",
1427                         port_id, dev_conf->rxmode.offloads,
1428                         dev_info.rx_offload_capa,
1429                         __func__);
1430                 ret = -EINVAL;
1431                 goto rollback;
1432         }
1433         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1434              dev_conf->txmode.offloads) {
1435                 RTE_ETHDEV_LOG(ERR,
1436                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1437                         "capabilities 0x%"PRIx64" in %s()\n",
1438                         port_id, dev_conf->txmode.offloads,
1439                         dev_info.tx_offload_capa,
1440                         __func__);
1441                 ret = -EINVAL;
1442                 goto rollback;
1443         }
1444
1445         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1446                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1447
1448         /* Check that device supports requested rss hash functions. */
1449         if ((dev_info.flow_type_rss_offloads |
1450              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1451             dev_info.flow_type_rss_offloads) {
1452                 RTE_ETHDEV_LOG(ERR,
1453                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1454                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1455                         dev_info.flow_type_rss_offloads);
1456                 ret = -EINVAL;
1457                 goto rollback;
1458         }
1459
1460         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1461         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1462             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1463                 RTE_ETHDEV_LOG(ERR,
1464                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1465                         port_id,
1466                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1467                 ret = -EINVAL;
1468                 goto rollback;
1469         }
1470
1471         /*
1472          * Setup new number of RX/TX queues and reconfigure device.
1473          */
1474         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1475         if (diag != 0) {
1476                 RTE_ETHDEV_LOG(ERR,
1477                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1478                         port_id, diag);
1479                 ret = diag;
1480                 goto rollback;
1481         }
1482
1483         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1484         if (diag != 0) {
1485                 RTE_ETHDEV_LOG(ERR,
1486                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1487                         port_id, diag);
1488                 rte_eth_dev_rx_queue_config(dev, 0);
1489                 ret = diag;
1490                 goto rollback;
1491         }
1492
1493         diag = (*dev->dev_ops->dev_configure)(dev);
1494         if (diag != 0) {
1495                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1496                         port_id, diag);
1497                 ret = eth_err(port_id, diag);
1498                 goto reset_queues;
1499         }
1500
1501         /* Initialize Rx profiling if enabled at compilation time. */
1502         diag = __rte_eth_dev_profile_init(port_id, dev);
1503         if (diag != 0) {
1504                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1505                         port_id, diag);
1506                 ret = eth_err(port_id, diag);
1507                 goto reset_queues;
1508         }
1509
1510         /* Validate Rx offloads. */
1511         diag = validate_offloads(port_id,
1512                         dev_conf->rxmode.offloads,
1513                         dev->data->dev_conf.rxmode.offloads, "Rx",
1514                         rte_eth_dev_rx_offload_name);
1515         if (diag != 0) {
1516                 ret = diag;
1517                 goto reset_queues;
1518         }
1519
1520         /* Validate Tx offloads. */
1521         diag = validate_offloads(port_id,
1522                         dev_conf->txmode.offloads,
1523                         dev->data->dev_conf.txmode.offloads, "Tx",
1524                         rte_eth_dev_tx_offload_name);
1525         if (diag != 0) {
1526                 ret = diag;
1527                 goto reset_queues;
1528         }
1529
1530         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1531         return 0;
1532 reset_queues:
1533         rte_eth_dev_rx_queue_config(dev, 0);
1534         rte_eth_dev_tx_queue_config(dev, 0);
1535 rollback:
1536         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1537
1538         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1539         return ret;
1540 }
1541
1542 void
1543 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1544 {
1545         if (dev->data->dev_started) {
1546                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1547                         dev->data->port_id);
1548                 return;
1549         }
1550
1551         rte_eth_dev_rx_queue_config(dev, 0);
1552         rte_eth_dev_tx_queue_config(dev, 0);
1553
1554         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1555 }
1556
1557 static void
1558 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1559                         struct rte_eth_dev_info *dev_info)
1560 {
1561         struct rte_ether_addr *addr;
1562         uint16_t i;
1563         uint32_t pool = 0;
1564         uint64_t pool_mask;
1565
1566         /* replay MAC address configuration including default MAC */
1567         addr = &dev->data->mac_addrs[0];
1568         if (*dev->dev_ops->mac_addr_set != NULL)
1569                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1570         else if (*dev->dev_ops->mac_addr_add != NULL)
1571                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1572
1573         if (*dev->dev_ops->mac_addr_add != NULL) {
1574                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1575                         addr = &dev->data->mac_addrs[i];
1576
1577                         /* skip zero address */
1578                         if (rte_is_zero_ether_addr(addr))
1579                                 continue;
1580
1581                         pool = 0;
1582                         pool_mask = dev->data->mac_pool_sel[i];
1583
1584                         do {
1585                                 if (pool_mask & 1ULL)
1586                                         (*dev->dev_ops->mac_addr_add)(dev,
1587                                                 addr, i, pool);
1588                                 pool_mask >>= 1;
1589                                 pool++;
1590                         } while (pool_mask);
1591                 }
1592         }
1593 }
1594
1595 static int
1596 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1597                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1598 {
1599         int ret;
1600
1601         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1602                 rte_eth_dev_mac_restore(dev, dev_info);
1603
1604         /* replay promiscuous configuration */
1605         /*
1606          * use callbacks directly since we don't need port_id check and
1607          * would like to bypass the same value set
1608          */
1609         if (rte_eth_promiscuous_get(port_id) == 1 &&
1610             *dev->dev_ops->promiscuous_enable != NULL) {
1611                 ret = eth_err(port_id,
1612                               (*dev->dev_ops->promiscuous_enable)(dev));
1613                 if (ret != 0 && ret != -ENOTSUP) {
1614                         RTE_ETHDEV_LOG(ERR,
1615                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1616                                 port_id, rte_strerror(-ret));
1617                         return ret;
1618                 }
1619         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1620                    *dev->dev_ops->promiscuous_disable != NULL) {
1621                 ret = eth_err(port_id,
1622                               (*dev->dev_ops->promiscuous_disable)(dev));
1623                 if (ret != 0 && ret != -ENOTSUP) {
1624                         RTE_ETHDEV_LOG(ERR,
1625                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1626                                 port_id, rte_strerror(-ret));
1627                         return ret;
1628                 }
1629         }
1630
1631         /* replay all multicast configuration */
1632         /*
1633          * use callbacks directly since we don't need port_id check and
1634          * would like to bypass the same value set
1635          */
1636         if (rte_eth_allmulticast_get(port_id) == 1 &&
1637             *dev->dev_ops->allmulticast_enable != NULL) {
1638                 ret = eth_err(port_id,
1639                               (*dev->dev_ops->allmulticast_enable)(dev));
1640                 if (ret != 0 && ret != -ENOTSUP) {
1641                         RTE_ETHDEV_LOG(ERR,
1642                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1643                                 port_id, rte_strerror(-ret));
1644                         return ret;
1645                 }
1646         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1647                    *dev->dev_ops->allmulticast_disable != NULL) {
1648                 ret = eth_err(port_id,
1649                               (*dev->dev_ops->allmulticast_disable)(dev));
1650                 if (ret != 0 && ret != -ENOTSUP) {
1651                         RTE_ETHDEV_LOG(ERR,
1652                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1653                                 port_id, rte_strerror(-ret));
1654                         return ret;
1655                 }
1656         }
1657
1658         return 0;
1659 }
1660
1661 int
1662 rte_eth_dev_start(uint16_t port_id)
1663 {
1664         struct rte_eth_dev *dev;
1665         struct rte_eth_dev_info dev_info;
1666         int diag;
1667         int ret;
1668
1669         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1670
1671         dev = &rte_eth_devices[port_id];
1672
1673         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1674
1675         if (dev->data->dev_started != 0) {
1676                 RTE_ETHDEV_LOG(INFO,
1677                         "Device with port_id=%"PRIu16" already started\n",
1678                         port_id);
1679                 return 0;
1680         }
1681
1682         ret = rte_eth_dev_info_get(port_id, &dev_info);
1683         if (ret != 0)
1684                 return ret;
1685
1686         /* Lets restore MAC now if device does not support live change */
1687         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1688                 rte_eth_dev_mac_restore(dev, &dev_info);
1689
1690         diag = (*dev->dev_ops->dev_start)(dev);
1691         if (diag == 0)
1692                 dev->data->dev_started = 1;
1693         else
1694                 return eth_err(port_id, diag);
1695
1696         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1697         if (ret != 0) {
1698                 RTE_ETHDEV_LOG(ERR,
1699                         "Error during restoring configuration for device (port %u): %s\n",
1700                         port_id, rte_strerror(-ret));
1701                 rte_eth_dev_stop(port_id);
1702                 return ret;
1703         }
1704
1705         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1706                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1707                 (*dev->dev_ops->link_update)(dev, 0);
1708         }
1709
1710         rte_ethdev_trace_start(port_id);
1711         return 0;
1712 }
1713
1714 void
1715 rte_eth_dev_stop(uint16_t port_id)
1716 {
1717         struct rte_eth_dev *dev;
1718
1719         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1720         dev = &rte_eth_devices[port_id];
1721
1722         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1723
1724         if (dev->data->dev_started == 0) {
1725                 RTE_ETHDEV_LOG(INFO,
1726                         "Device with port_id=%"PRIu16" already stopped\n",
1727                         port_id);
1728                 return;
1729         }
1730
1731         dev->data->dev_started = 0;
1732         (*dev->dev_ops->dev_stop)(dev);
1733         rte_ethdev_trace_stop(port_id);
1734 }
1735
1736 int
1737 rte_eth_dev_set_link_up(uint16_t port_id)
1738 {
1739         struct rte_eth_dev *dev;
1740
1741         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1742
1743         dev = &rte_eth_devices[port_id];
1744
1745         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1746         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1747 }
1748
1749 int
1750 rte_eth_dev_set_link_down(uint16_t port_id)
1751 {
1752         struct rte_eth_dev *dev;
1753
1754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1755
1756         dev = &rte_eth_devices[port_id];
1757
1758         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1759         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1760 }
1761
1762 void
1763 rte_eth_dev_close(uint16_t port_id)
1764 {
1765         struct rte_eth_dev *dev;
1766
1767         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1768         dev = &rte_eth_devices[port_id];
1769
1770         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1771         dev->data->dev_started = 0;
1772         (*dev->dev_ops->dev_close)(dev);
1773
1774         rte_ethdev_trace_close(port_id);
1775         rte_eth_dev_release_port(dev);
1776 }
1777
1778 int
1779 rte_eth_dev_reset(uint16_t port_id)
1780 {
1781         struct rte_eth_dev *dev;
1782         int ret;
1783
1784         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1785         dev = &rte_eth_devices[port_id];
1786
1787         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1788
1789         rte_eth_dev_stop(port_id);
1790         ret = dev->dev_ops->dev_reset(dev);
1791
1792         return eth_err(port_id, ret);
1793 }
1794
1795 int
1796 rte_eth_dev_is_removed(uint16_t port_id)
1797 {
1798         struct rte_eth_dev *dev;
1799         int ret;
1800
1801         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1802
1803         dev = &rte_eth_devices[port_id];
1804
1805         if (dev->state == RTE_ETH_DEV_REMOVED)
1806                 return 1;
1807
1808         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1809
1810         ret = dev->dev_ops->is_removed(dev);
1811         if (ret != 0)
1812                 /* Device is physically removed. */
1813                 dev->state = RTE_ETH_DEV_REMOVED;
1814
1815         return ret;
1816 }
1817
1818 int
1819 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1820                        uint16_t nb_rx_desc, unsigned int socket_id,
1821                        const struct rte_eth_rxconf *rx_conf,
1822                        struct rte_mempool *mp)
1823 {
1824         int ret;
1825         uint32_t mbp_buf_size;
1826         struct rte_eth_dev *dev;
1827         struct rte_eth_dev_info dev_info;
1828         struct rte_eth_rxconf local_conf;
1829         void **rxq;
1830
1831         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1832
1833         dev = &rte_eth_devices[port_id];
1834         if (rx_queue_id >= dev->data->nb_rx_queues) {
1835                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1836                 return -EINVAL;
1837         }
1838
1839         if (mp == NULL) {
1840                 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1841                 return -EINVAL;
1842         }
1843
1844         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1845
1846         /*
1847          * Check the size of the mbuf data buffer.
1848          * This value must be provided in the private data of the memory pool.
1849          * First check that the memory pool has a valid private data.
1850          */
1851         ret = rte_eth_dev_info_get(port_id, &dev_info);
1852         if (ret != 0)
1853                 return ret;
1854
1855         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1856                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1857                         mp->name, (int)mp->private_data_size,
1858                         (int)sizeof(struct rte_pktmbuf_pool_private));
1859                 return -ENOSPC;
1860         }
1861         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1862
1863         if (mbp_buf_size < dev_info.min_rx_bufsize + RTE_PKTMBUF_HEADROOM) {
1864                 RTE_ETHDEV_LOG(ERR,
1865                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1866                         mp->name, (int)mbp_buf_size,
1867                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1868                         (int)RTE_PKTMBUF_HEADROOM,
1869                         (int)dev_info.min_rx_bufsize);
1870                 return -EINVAL;
1871         }
1872
1873         /* Use default specified by driver, if nb_rx_desc is zero */
1874         if (nb_rx_desc == 0) {
1875                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1876                 /* If driver default is also zero, fall back on EAL default */
1877                 if (nb_rx_desc == 0)
1878                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1879         }
1880
1881         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1882                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1883                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1884
1885                 RTE_ETHDEV_LOG(ERR,
1886                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1887                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1888                         dev_info.rx_desc_lim.nb_min,
1889                         dev_info.rx_desc_lim.nb_align);
1890                 return -EINVAL;
1891         }
1892
1893         if (dev->data->dev_started &&
1894                 !(dev_info.dev_capa &
1895                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1896                 return -EBUSY;
1897
1898         if (dev->data->dev_started &&
1899                 (dev->data->rx_queue_state[rx_queue_id] !=
1900                         RTE_ETH_QUEUE_STATE_STOPPED))
1901                 return -EBUSY;
1902
1903         rxq = dev->data->rx_queues;
1904         if (rxq[rx_queue_id]) {
1905                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1906                                         -ENOTSUP);
1907                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1908                 rxq[rx_queue_id] = NULL;
1909         }
1910
1911         if (rx_conf == NULL)
1912                 rx_conf = &dev_info.default_rxconf;
1913
1914         local_conf = *rx_conf;
1915
1916         /*
1917          * If an offloading has already been enabled in
1918          * rte_eth_dev_configure(), it has been enabled on all queues,
1919          * so there is no need to enable it in this queue again.
1920          * The local_conf.offloads input to underlying PMD only carries
1921          * those offloadings which are only enabled on this queue and
1922          * not enabled on all queues.
1923          */
1924         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1925
1926         /*
1927          * New added offloadings for this queue are those not enabled in
1928          * rte_eth_dev_configure() and they must be per-queue type.
1929          * A pure per-port offloading can't be enabled on a queue while
1930          * disabled on another queue. A pure per-port offloading can't
1931          * be enabled for any queue as new added one if it hasn't been
1932          * enabled in rte_eth_dev_configure().
1933          */
1934         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1935              local_conf.offloads) {
1936                 RTE_ETHDEV_LOG(ERR,
1937                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1938                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1939                         port_id, rx_queue_id, local_conf.offloads,
1940                         dev_info.rx_queue_offload_capa,
1941                         __func__);
1942                 return -EINVAL;
1943         }
1944
1945         /*
1946          * If LRO is enabled, check that the maximum aggregated packet
1947          * size is supported by the configured device.
1948          */
1949         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1950                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1951                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1952                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1953                 int ret = check_lro_pkt_size(port_id,
1954                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1955                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1956                                 dev_info.max_lro_pkt_size);
1957                 if (ret != 0)
1958                         return ret;
1959         }
1960
1961         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1962                                               socket_id, &local_conf, mp);
1963         if (!ret) {
1964                 if (!dev->data->min_rx_buf_size ||
1965                     dev->data->min_rx_buf_size > mbp_buf_size)
1966                         dev->data->min_rx_buf_size = mbp_buf_size;
1967         }
1968
1969         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
1970                 rx_conf, ret);
1971         return eth_err(port_id, ret);
1972 }
1973
1974 int
1975 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1976                                uint16_t nb_rx_desc,
1977                                const struct rte_eth_hairpin_conf *conf)
1978 {
1979         int ret;
1980         struct rte_eth_dev *dev;
1981         struct rte_eth_hairpin_cap cap;
1982         void **rxq;
1983         int i;
1984         int count;
1985
1986         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1987
1988         dev = &rte_eth_devices[port_id];
1989         if (rx_queue_id >= dev->data->nb_rx_queues) {
1990                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1991                 return -EINVAL;
1992         }
1993         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1994         if (ret != 0)
1995                 return ret;
1996         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1997                                 -ENOTSUP);
1998         /* if nb_rx_desc is zero use max number of desc from the driver. */
1999         if (nb_rx_desc == 0)
2000                 nb_rx_desc = cap.max_nb_desc;
2001         if (nb_rx_desc > cap.max_nb_desc) {
2002                 RTE_ETHDEV_LOG(ERR,
2003                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2004                         nb_rx_desc, cap.max_nb_desc);
2005                 return -EINVAL;
2006         }
2007         if (conf->peer_count > cap.max_rx_2_tx) {
2008                 RTE_ETHDEV_LOG(ERR,
2009                         "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
2010                         conf->peer_count, cap.max_rx_2_tx);
2011                 return -EINVAL;
2012         }
2013         if (conf->peer_count == 0) {
2014                 RTE_ETHDEV_LOG(ERR,
2015                         "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
2016                         conf->peer_count);
2017                 return -EINVAL;
2018         }
2019         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2020              cap.max_nb_queues != UINT16_MAX; i++) {
2021                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2022                         count++;
2023         }
2024         if (count > cap.max_nb_queues) {
2025                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2026                 cap.max_nb_queues);
2027                 return -EINVAL;
2028         }
2029         if (dev->data->dev_started)
2030                 return -EBUSY;
2031         rxq = dev->data->rx_queues;
2032         if (rxq[rx_queue_id] != NULL) {
2033                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2034                                         -ENOTSUP);
2035                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2036                 rxq[rx_queue_id] = NULL;
2037         }
2038         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2039                                                       nb_rx_desc, conf);
2040         if (ret == 0)
2041                 dev->data->rx_queue_state[rx_queue_id] =
2042                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2043         return eth_err(port_id, ret);
2044 }
2045
2046 int
2047 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2048                        uint16_t nb_tx_desc, unsigned int socket_id,
2049                        const struct rte_eth_txconf *tx_conf)
2050 {
2051         struct rte_eth_dev *dev;
2052         struct rte_eth_dev_info dev_info;
2053         struct rte_eth_txconf local_conf;
2054         void **txq;
2055         int ret;
2056
2057         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2058
2059         dev = &rte_eth_devices[port_id];
2060         if (tx_queue_id >= dev->data->nb_tx_queues) {
2061                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2062                 return -EINVAL;
2063         }
2064
2065         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2066
2067         ret = rte_eth_dev_info_get(port_id, &dev_info);
2068         if (ret != 0)
2069                 return ret;
2070
2071         /* Use default specified by driver, if nb_tx_desc is zero */
2072         if (nb_tx_desc == 0) {
2073                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2074                 /* If driver default is zero, fall back on EAL default */
2075                 if (nb_tx_desc == 0)
2076                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2077         }
2078         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2079             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2080             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2081                 RTE_ETHDEV_LOG(ERR,
2082                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2083                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2084                         dev_info.tx_desc_lim.nb_min,
2085                         dev_info.tx_desc_lim.nb_align);
2086                 return -EINVAL;
2087         }
2088
2089         if (dev->data->dev_started &&
2090                 !(dev_info.dev_capa &
2091                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2092                 return -EBUSY;
2093
2094         if (dev->data->dev_started &&
2095                 (dev->data->tx_queue_state[tx_queue_id] !=
2096                         RTE_ETH_QUEUE_STATE_STOPPED))
2097                 return -EBUSY;
2098
2099         txq = dev->data->tx_queues;
2100         if (txq[tx_queue_id]) {
2101                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2102                                         -ENOTSUP);
2103                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2104                 txq[tx_queue_id] = NULL;
2105         }
2106
2107         if (tx_conf == NULL)
2108                 tx_conf = &dev_info.default_txconf;
2109
2110         local_conf = *tx_conf;
2111
2112         /*
2113          * If an offloading has already been enabled in
2114          * rte_eth_dev_configure(), it has been enabled on all queues,
2115          * so there is no need to enable it in this queue again.
2116          * The local_conf.offloads input to underlying PMD only carries
2117          * those offloadings which are only enabled on this queue and
2118          * not enabled on all queues.
2119          */
2120         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2121
2122         /*
2123          * New added offloadings for this queue are those not enabled in
2124          * rte_eth_dev_configure() and they must be per-queue type.
2125          * A pure per-port offloading can't be enabled on a queue while
2126          * disabled on another queue. A pure per-port offloading can't
2127          * be enabled for any queue as new added one if it hasn't been
2128          * enabled in rte_eth_dev_configure().
2129          */
2130         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2131              local_conf.offloads) {
2132                 RTE_ETHDEV_LOG(ERR,
2133                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2134                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2135                         port_id, tx_queue_id, local_conf.offloads,
2136                         dev_info.tx_queue_offload_capa,
2137                         __func__);
2138                 return -EINVAL;
2139         }
2140
2141         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2142         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2143                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2144 }
2145
2146 int
2147 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2148                                uint16_t nb_tx_desc,
2149                                const struct rte_eth_hairpin_conf *conf)
2150 {
2151         struct rte_eth_dev *dev;
2152         struct rte_eth_hairpin_cap cap;
2153         void **txq;
2154         int i;
2155         int count;
2156         int ret;
2157
2158         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2159         dev = &rte_eth_devices[port_id];
2160         if (tx_queue_id >= dev->data->nb_tx_queues) {
2161                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2162                 return -EINVAL;
2163         }
2164         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2165         if (ret != 0)
2166                 return ret;
2167         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2168                                 -ENOTSUP);
2169         /* if nb_rx_desc is zero use max number of desc from the driver. */
2170         if (nb_tx_desc == 0)
2171                 nb_tx_desc = cap.max_nb_desc;
2172         if (nb_tx_desc > cap.max_nb_desc) {
2173                 RTE_ETHDEV_LOG(ERR,
2174                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2175                         nb_tx_desc, cap.max_nb_desc);
2176                 return -EINVAL;
2177         }
2178         if (conf->peer_count > cap.max_tx_2_rx) {
2179                 RTE_ETHDEV_LOG(ERR,
2180                         "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
2181                         conf->peer_count, cap.max_tx_2_rx);
2182                 return -EINVAL;
2183         }
2184         if (conf->peer_count == 0) {
2185                 RTE_ETHDEV_LOG(ERR,
2186                         "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
2187                         conf->peer_count);
2188                 return -EINVAL;
2189         }
2190         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2191              cap.max_nb_queues != UINT16_MAX; i++) {
2192                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2193                         count++;
2194         }
2195         if (count > cap.max_nb_queues) {
2196                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2197                 cap.max_nb_queues);
2198                 return -EINVAL;
2199         }
2200         if (dev->data->dev_started)
2201                 return -EBUSY;
2202         txq = dev->data->tx_queues;
2203         if (txq[tx_queue_id] != NULL) {
2204                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2205                                         -ENOTSUP);
2206                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2207                 txq[tx_queue_id] = NULL;
2208         }
2209         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2210                 (dev, tx_queue_id, nb_tx_desc, conf);
2211         if (ret == 0)
2212                 dev->data->tx_queue_state[tx_queue_id] =
2213                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2214         return eth_err(port_id, ret);
2215 }
2216
2217 void
2218 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2219                 void *userdata __rte_unused)
2220 {
2221         rte_pktmbuf_free_bulk(pkts, unsent);
2222 }
2223
2224 void
2225 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2226                 void *userdata)
2227 {
2228         uint64_t *count = userdata;
2229
2230         rte_pktmbuf_free_bulk(pkts, unsent);
2231         *count += unsent;
2232 }
2233
2234 int
2235 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2236                 buffer_tx_error_fn cbfn, void *userdata)
2237 {
2238         buffer->error_callback = cbfn;
2239         buffer->error_userdata = userdata;
2240         return 0;
2241 }
2242
2243 int
2244 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2245 {
2246         int ret = 0;
2247
2248         if (buffer == NULL)
2249                 return -EINVAL;
2250
2251         buffer->size = size;
2252         if (buffer->error_callback == NULL) {
2253                 ret = rte_eth_tx_buffer_set_err_callback(
2254                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2255         }
2256
2257         return ret;
2258 }
2259
2260 int
2261 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2262 {
2263         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2264         int ret;
2265
2266         /* Validate Input Data. Bail if not valid or not supported. */
2267         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2268         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2269
2270         /* Call driver to free pending mbufs. */
2271         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2272                                                free_cnt);
2273         return eth_err(port_id, ret);
2274 }
2275
2276 int
2277 rte_eth_promiscuous_enable(uint16_t port_id)
2278 {
2279         struct rte_eth_dev *dev;
2280         int diag = 0;
2281
2282         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2283         dev = &rte_eth_devices[port_id];
2284
2285         if (dev->data->promiscuous == 1)
2286                 return 0;
2287
2288         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2289
2290         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2291         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2292
2293         return eth_err(port_id, diag);
2294 }
2295
2296 int
2297 rte_eth_promiscuous_disable(uint16_t port_id)
2298 {
2299         struct rte_eth_dev *dev;
2300         int diag = 0;
2301
2302         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2303         dev = &rte_eth_devices[port_id];
2304
2305         if (dev->data->promiscuous == 0)
2306                 return 0;
2307
2308         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2309
2310         dev->data->promiscuous = 0;
2311         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2312         if (diag != 0)
2313                 dev->data->promiscuous = 1;
2314
2315         return eth_err(port_id, diag);
2316 }
2317
2318 int
2319 rte_eth_promiscuous_get(uint16_t port_id)
2320 {
2321         struct rte_eth_dev *dev;
2322
2323         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2324
2325         dev = &rte_eth_devices[port_id];
2326         return dev->data->promiscuous;
2327 }
2328
2329 int
2330 rte_eth_allmulticast_enable(uint16_t port_id)
2331 {
2332         struct rte_eth_dev *dev;
2333         int diag;
2334
2335         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2336         dev = &rte_eth_devices[port_id];
2337
2338         if (dev->data->all_multicast == 1)
2339                 return 0;
2340
2341         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2342         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2343         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2344
2345         return eth_err(port_id, diag);
2346 }
2347
2348 int
2349 rte_eth_allmulticast_disable(uint16_t port_id)
2350 {
2351         struct rte_eth_dev *dev;
2352         int diag;
2353
2354         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2355         dev = &rte_eth_devices[port_id];
2356
2357         if (dev->data->all_multicast == 0)
2358                 return 0;
2359
2360         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2361         dev->data->all_multicast = 0;
2362         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2363         if (diag != 0)
2364                 dev->data->all_multicast = 1;
2365
2366         return eth_err(port_id, diag);
2367 }
2368
2369 int
2370 rte_eth_allmulticast_get(uint16_t port_id)
2371 {
2372         struct rte_eth_dev *dev;
2373
2374         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2375
2376         dev = &rte_eth_devices[port_id];
2377         return dev->data->all_multicast;
2378 }
2379
2380 int
2381 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2382 {
2383         struct rte_eth_dev *dev;
2384
2385         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2386         dev = &rte_eth_devices[port_id];
2387
2388         if (dev->data->dev_conf.intr_conf.lsc &&
2389             dev->data->dev_started)
2390                 rte_eth_linkstatus_get(dev, eth_link);
2391         else {
2392                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2393                 (*dev->dev_ops->link_update)(dev, 1);
2394                 *eth_link = dev->data->dev_link;
2395         }
2396
2397         return 0;
2398 }
2399
2400 int
2401 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2402 {
2403         struct rte_eth_dev *dev;
2404
2405         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2406         dev = &rte_eth_devices[port_id];
2407
2408         if (dev->data->dev_conf.intr_conf.lsc &&
2409             dev->data->dev_started)
2410                 rte_eth_linkstatus_get(dev, eth_link);
2411         else {
2412                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2413                 (*dev->dev_ops->link_update)(dev, 0);
2414                 *eth_link = dev->data->dev_link;
2415         }
2416
2417         return 0;
2418 }
2419
2420 const char *
2421 rte_eth_link_speed_to_str(uint32_t link_speed)
2422 {
2423         switch (link_speed) {
2424         case ETH_SPEED_NUM_NONE: return "None";
2425         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2426         case ETH_SPEED_NUM_100M: return "100 Mbps";
2427         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2428         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2429         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2430         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2431         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2432         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2433         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2434         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2435         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2436         case ETH_SPEED_NUM_100G: return "100 Gbps";
2437         case ETH_SPEED_NUM_200G: return "200 Gbps";
2438         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2439         default: return "Invalid";
2440         }
2441 }
2442
2443 int
2444 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2445 {
2446         if (eth_link->link_status == ETH_LINK_DOWN)
2447                 return snprintf(str, len, "Link down");
2448         else
2449                 return snprintf(str, len, "Link up at %s %s %s",
2450                         rte_eth_link_speed_to_str(eth_link->link_speed),
2451                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2452                         "FDX" : "HDX",
2453                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2454                         "Autoneg" : "Fixed");
2455 }
2456
2457 int
2458 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2459 {
2460         struct rte_eth_dev *dev;
2461
2462         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2463
2464         dev = &rte_eth_devices[port_id];
2465         memset(stats, 0, sizeof(*stats));
2466
2467         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2468         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2469         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2470 }
2471
2472 int
2473 rte_eth_stats_reset(uint16_t port_id)
2474 {
2475         struct rte_eth_dev *dev;
2476         int ret;
2477
2478         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2479         dev = &rte_eth_devices[port_id];
2480
2481         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2482         ret = (*dev->dev_ops->stats_reset)(dev);
2483         if (ret != 0)
2484                 return eth_err(port_id, ret);
2485
2486         dev->data->rx_mbuf_alloc_failed = 0;
2487
2488         return 0;
2489 }
2490
2491 static inline int
2492 get_xstats_basic_count(struct rte_eth_dev *dev)
2493 {
2494         uint16_t nb_rxqs, nb_txqs;
2495         int count;
2496
2497         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2498         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2499
2500         count = RTE_NB_STATS;
2501         count += nb_rxqs * RTE_NB_RXQ_STATS;
2502         count += nb_txqs * RTE_NB_TXQ_STATS;
2503
2504         return count;
2505 }
2506
2507 static int
2508 get_xstats_count(uint16_t port_id)
2509 {
2510         struct rte_eth_dev *dev;
2511         int count;
2512
2513         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2514         dev = &rte_eth_devices[port_id];
2515         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2516                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2517                                 NULL, 0);
2518                 if (count < 0)
2519                         return eth_err(port_id, count);
2520         }
2521         if (dev->dev_ops->xstats_get_names != NULL) {
2522                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2523                 if (count < 0)
2524                         return eth_err(port_id, count);
2525         } else
2526                 count = 0;
2527
2528
2529         count += get_xstats_basic_count(dev);
2530
2531         return count;
2532 }
2533
2534 int
2535 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2536                 uint64_t *id)
2537 {
2538         int cnt_xstats, idx_xstat;
2539
2540         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2541
2542         if (!id) {
2543                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2544                 return -ENOMEM;
2545         }
2546
2547         if (!xstat_name) {
2548                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2549                 return -ENOMEM;
2550         }
2551
2552         /* Get count */
2553         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2554         if (cnt_xstats  < 0) {
2555                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2556                 return -ENODEV;
2557         }
2558
2559         /* Get id-name lookup table */
2560         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2561
2562         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2563                         port_id, xstats_names, cnt_xstats, NULL)) {
2564                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2565                 return -1;
2566         }
2567
2568         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2569                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2570                         *id = idx_xstat;
2571                         return 0;
2572                 };
2573         }
2574
2575         return -EINVAL;
2576 }
2577
2578 /* retrieve basic stats names */
2579 static int
2580 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2581         struct rte_eth_xstat_name *xstats_names)
2582 {
2583         int cnt_used_entries = 0;
2584         uint32_t idx, id_queue;
2585         uint16_t num_q;
2586
2587         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2588                 strlcpy(xstats_names[cnt_used_entries].name,
2589                         rte_stats_strings[idx].name,
2590                         sizeof(xstats_names[0].name));
2591                 cnt_used_entries++;
2592         }
2593         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2594         for (id_queue = 0; id_queue < num_q; id_queue++) {
2595                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2596                         snprintf(xstats_names[cnt_used_entries].name,
2597                                 sizeof(xstats_names[0].name),
2598                                 "rx_q%u_%s",
2599                                 id_queue, rte_rxq_stats_strings[idx].name);
2600                         cnt_used_entries++;
2601                 }
2602
2603         }
2604         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2605         for (id_queue = 0; id_queue < num_q; id_queue++) {
2606                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2607                         snprintf(xstats_names[cnt_used_entries].name,
2608                                 sizeof(xstats_names[0].name),
2609                                 "tx_q%u_%s",
2610                                 id_queue, rte_txq_stats_strings[idx].name);
2611                         cnt_used_entries++;
2612                 }
2613         }
2614         return cnt_used_entries;
2615 }
2616
2617 /* retrieve ethdev extended statistics names */
2618 int
2619 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2620         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2621         uint64_t *ids)
2622 {
2623         struct rte_eth_xstat_name *xstats_names_copy;
2624         unsigned int no_basic_stat_requested = 1;
2625         unsigned int no_ext_stat_requested = 1;
2626         unsigned int expected_entries;
2627         unsigned int basic_count;
2628         struct rte_eth_dev *dev;
2629         unsigned int i;
2630         int ret;
2631
2632         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2633         dev = &rte_eth_devices[port_id];
2634
2635         basic_count = get_xstats_basic_count(dev);
2636         ret = get_xstats_count(port_id);
2637         if (ret < 0)
2638                 return ret;
2639         expected_entries = (unsigned int)ret;
2640
2641         /* Return max number of stats if no ids given */
2642         if (!ids) {
2643                 if (!xstats_names)
2644                         return expected_entries;
2645                 else if (xstats_names && size < expected_entries)
2646                         return expected_entries;
2647         }
2648
2649         if (ids && !xstats_names)
2650                 return -EINVAL;
2651
2652         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2653                 uint64_t ids_copy[size];
2654
2655                 for (i = 0; i < size; i++) {
2656                         if (ids[i] < basic_count) {
2657                                 no_basic_stat_requested = 0;
2658                                 break;
2659                         }
2660
2661                         /*
2662                          * Convert ids to xstats ids that PMD knows.
2663                          * ids known by user are basic + extended stats.
2664                          */
2665                         ids_copy[i] = ids[i] - basic_count;
2666                 }
2667
2668                 if (no_basic_stat_requested)
2669                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2670                                         xstats_names, ids_copy, size);
2671         }
2672
2673         /* Retrieve all stats */
2674         if (!ids) {
2675                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2676                                 expected_entries);
2677                 if (num_stats < 0 || num_stats > (int)expected_entries)
2678                         return num_stats;
2679                 else
2680                         return expected_entries;
2681         }
2682
2683         xstats_names_copy = calloc(expected_entries,
2684                 sizeof(struct rte_eth_xstat_name));
2685
2686         if (!xstats_names_copy) {
2687                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2688                 return -ENOMEM;
2689         }
2690
2691         if (ids) {
2692                 for (i = 0; i < size; i++) {
2693                         if (ids[i] >= basic_count) {
2694                                 no_ext_stat_requested = 0;
2695                                 break;
2696                         }
2697                 }
2698         }
2699
2700         /* Fill xstats_names_copy structure */
2701         if (ids && no_ext_stat_requested) {
2702                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2703         } else {
2704                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2705                         expected_entries);
2706                 if (ret < 0) {
2707                         free(xstats_names_copy);
2708                         return ret;
2709                 }
2710         }
2711
2712         /* Filter stats */
2713         for (i = 0; i < size; i++) {
2714                 if (ids[i] >= expected_entries) {
2715                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2716                         free(xstats_names_copy);
2717                         return -1;
2718                 }
2719                 xstats_names[i] = xstats_names_copy[ids[i]];
2720         }
2721
2722         free(xstats_names_copy);
2723         return size;
2724 }
2725
2726 int
2727 rte_eth_xstats_get_names(uint16_t port_id,
2728         struct rte_eth_xstat_name *xstats_names,
2729         unsigned int size)
2730 {
2731         struct rte_eth_dev *dev;
2732         int cnt_used_entries;
2733         int cnt_expected_entries;
2734         int cnt_driver_entries;
2735
2736         cnt_expected_entries = get_xstats_count(port_id);
2737         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2738                         (int)size < cnt_expected_entries)
2739                 return cnt_expected_entries;
2740
2741         /* port_id checked in get_xstats_count() */
2742         dev = &rte_eth_devices[port_id];
2743
2744         cnt_used_entries = rte_eth_basic_stats_get_names(
2745                 dev, xstats_names);
2746
2747         if (dev->dev_ops->xstats_get_names != NULL) {
2748                 /* If there are any driver-specific xstats, append them
2749                  * to end of list.
2750                  */
2751                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2752                         dev,
2753                         xstats_names + cnt_used_entries,
2754                         size - cnt_used_entries);
2755                 if (cnt_driver_entries < 0)
2756                         return eth_err(port_id, cnt_driver_entries);
2757                 cnt_used_entries += cnt_driver_entries;
2758         }
2759
2760         return cnt_used_entries;
2761 }
2762
2763
2764 static int
2765 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2766 {
2767         struct rte_eth_dev *dev;
2768         struct rte_eth_stats eth_stats;
2769         unsigned int count = 0, i, q;
2770         uint64_t val, *stats_ptr;
2771         uint16_t nb_rxqs, nb_txqs;
2772         int ret;
2773
2774         ret = rte_eth_stats_get(port_id, &eth_stats);
2775         if (ret < 0)
2776                 return ret;
2777
2778         dev = &rte_eth_devices[port_id];
2779
2780         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2781         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2782
2783         /* global stats */
2784         for (i = 0; i < RTE_NB_STATS; i++) {
2785                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2786                                         rte_stats_strings[i].offset);
2787                 val = *stats_ptr;
2788                 xstats[count++].value = val;
2789         }
2790
2791         /* per-rxq stats */
2792         for (q = 0; q < nb_rxqs; q++) {
2793                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2794                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2795                                         rte_rxq_stats_strings[i].offset +
2796                                         q * sizeof(uint64_t));
2797                         val = *stats_ptr;
2798                         xstats[count++].value = val;
2799                 }
2800         }
2801
2802         /* per-txq stats */
2803         for (q = 0; q < nb_txqs; q++) {
2804                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2805                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2806                                         rte_txq_stats_strings[i].offset +
2807                                         q * sizeof(uint64_t));
2808                         val = *stats_ptr;
2809                         xstats[count++].value = val;
2810                 }
2811         }
2812         return count;
2813 }
2814
2815 /* retrieve ethdev extended statistics */
2816 int
2817 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2818                          uint64_t *values, unsigned int size)
2819 {
2820         unsigned int no_basic_stat_requested = 1;
2821         unsigned int no_ext_stat_requested = 1;
2822         unsigned int num_xstats_filled;
2823         unsigned int basic_count;
2824         uint16_t expected_entries;
2825         struct rte_eth_dev *dev;
2826         unsigned int i;
2827         int ret;
2828
2829         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2830         ret = get_xstats_count(port_id);
2831         if (ret < 0)
2832                 return ret;
2833         expected_entries = (uint16_t)ret;
2834         struct rte_eth_xstat xstats[expected_entries];
2835         dev = &rte_eth_devices[port_id];
2836         basic_count = get_xstats_basic_count(dev);
2837
2838         /* Return max number of stats if no ids given */
2839         if (!ids) {
2840                 if (!values)
2841                         return expected_entries;
2842                 else if (values && size < expected_entries)
2843                         return expected_entries;
2844         }
2845
2846         if (ids && !values)
2847                 return -EINVAL;
2848
2849         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2850                 unsigned int basic_count = get_xstats_basic_count(dev);
2851                 uint64_t ids_copy[size];
2852
2853                 for (i = 0; i < size; i++) {
2854                         if (ids[i] < basic_count) {
2855                                 no_basic_stat_requested = 0;
2856                                 break;
2857                         }
2858
2859                         /*
2860                          * Convert ids to xstats ids that PMD knows.
2861                          * ids known by user are basic + extended stats.
2862                          */
2863                         ids_copy[i] = ids[i] - basic_count;
2864                 }
2865
2866                 if (no_basic_stat_requested)
2867                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2868                                         values, size);
2869         }
2870
2871         if (ids) {
2872                 for (i = 0; i < size; i++) {
2873                         if (ids[i] >= basic_count) {
2874                                 no_ext_stat_requested = 0;
2875                                 break;
2876                         }
2877                 }
2878         }
2879
2880         /* Fill the xstats structure */
2881         if (ids && no_ext_stat_requested)
2882                 ret = rte_eth_basic_stats_get(port_id, xstats);
2883         else
2884                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2885
2886         if (ret < 0)
2887                 return ret;
2888         num_xstats_filled = (unsigned int)ret;
2889
2890         /* Return all stats */
2891         if (!ids) {
2892                 for (i = 0; i < num_xstats_filled; i++)
2893                         values[i] = xstats[i].value;
2894                 return expected_entries;
2895         }
2896
2897         /* Filter stats */
2898         for (i = 0; i < size; i++) {
2899                 if (ids[i] >= expected_entries) {
2900                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2901                         return -1;
2902                 }
2903                 values[i] = xstats[ids[i]].value;
2904         }
2905         return size;
2906 }
2907
2908 int
2909 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2910         unsigned int n)
2911 {
2912         struct rte_eth_dev *dev;
2913         unsigned int count = 0, i;
2914         signed int xcount = 0;
2915         uint16_t nb_rxqs, nb_txqs;
2916         int ret;
2917
2918         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2919
2920         dev = &rte_eth_devices[port_id];
2921
2922         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2923         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2924
2925         /* Return generic statistics */
2926         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2927                 (nb_txqs * RTE_NB_TXQ_STATS);
2928
2929         /* implemented by the driver */
2930         if (dev->dev_ops->xstats_get != NULL) {
2931                 /* Retrieve the xstats from the driver at the end of the
2932                  * xstats struct.
2933                  */
2934                 xcount = (*dev->dev_ops->xstats_get)(dev,
2935                                      xstats ? xstats + count : NULL,
2936                                      (n > count) ? n - count : 0);
2937
2938                 if (xcount < 0)
2939                         return eth_err(port_id, xcount);
2940         }
2941
2942         if (n < count + xcount || xstats == NULL)
2943                 return count + xcount;
2944
2945         /* now fill the xstats structure */
2946         ret = rte_eth_basic_stats_get(port_id, xstats);
2947         if (ret < 0)
2948                 return ret;
2949         count = ret;
2950
2951         for (i = 0; i < count; i++)
2952                 xstats[i].id = i;
2953         /* add an offset to driver-specific stats */
2954         for ( ; i < count + xcount; i++)
2955                 xstats[i].id += count;
2956
2957         return count + xcount;
2958 }
2959
2960 /* reset ethdev extended statistics */
2961 int
2962 rte_eth_xstats_reset(uint16_t port_id)
2963 {
2964         struct rte_eth_dev *dev;
2965
2966         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2967         dev = &rte_eth_devices[port_id];
2968
2969         /* implemented by the driver */
2970         if (dev->dev_ops->xstats_reset != NULL)
2971                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
2972
2973         /* fallback to default */
2974         return rte_eth_stats_reset(port_id);
2975 }
2976
2977 static int
2978 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2979                 uint8_t is_rx)
2980 {
2981         struct rte_eth_dev *dev;
2982
2983         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2984
2985         dev = &rte_eth_devices[port_id];
2986
2987         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2988
2989         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2990                 return -EINVAL;
2991
2992         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2993                 return -EINVAL;
2994
2995         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2996                 return -EINVAL;
2997
2998         return (*dev->dev_ops->queue_stats_mapping_set)
2999                         (dev, queue_id, stat_idx, is_rx);
3000 }
3001
3002
3003 int
3004 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3005                 uint8_t stat_idx)
3006 {
3007         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
3008                                                 stat_idx, STAT_QMAP_TX));
3009 }
3010
3011
3012 int
3013 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3014                 uint8_t stat_idx)
3015 {
3016         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
3017                                                 stat_idx, STAT_QMAP_RX));
3018 }
3019
3020 int
3021 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3022 {
3023         struct rte_eth_dev *dev;
3024
3025         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3026         dev = &rte_eth_devices[port_id];
3027
3028         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3029         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3030                                                         fw_version, fw_size));
3031 }
3032
3033 int
3034 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3035 {
3036         struct rte_eth_dev *dev;
3037         const struct rte_eth_desc_lim lim = {
3038                 .nb_max = UINT16_MAX,
3039                 .nb_min = 0,
3040                 .nb_align = 1,
3041                 .nb_seg_max = UINT16_MAX,
3042                 .nb_mtu_seg_max = UINT16_MAX,
3043         };
3044         int diag;
3045
3046         /*
3047          * Init dev_info before port_id check since caller does not have
3048          * return status and does not know if get is successful or not.
3049          */
3050         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3051         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3052
3053         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3054         dev = &rte_eth_devices[port_id];
3055
3056         dev_info->rx_desc_lim = lim;
3057         dev_info->tx_desc_lim = lim;
3058         dev_info->device = dev->device;
3059         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3060         dev_info->max_mtu = UINT16_MAX;
3061
3062         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3063         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3064         if (diag != 0) {
3065                 /* Cleanup already filled in device information */
3066                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3067                 return eth_err(port_id, diag);
3068         }
3069
3070         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3071         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3072                         RTE_MAX_QUEUES_PER_PORT);
3073         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3074                         RTE_MAX_QUEUES_PER_PORT);
3075
3076         dev_info->driver_name = dev->device->driver->name;
3077         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3078         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3079
3080         dev_info->dev_flags = &dev->data->dev_flags;
3081
3082         return 0;
3083 }
3084
3085 int
3086 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3087                                  uint32_t *ptypes, int num)
3088 {
3089         int i, j;
3090         struct rte_eth_dev *dev;
3091         const uint32_t *all_ptypes;
3092
3093         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3094         dev = &rte_eth_devices[port_id];
3095         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3096         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3097
3098         if (!all_ptypes)
3099                 return 0;
3100
3101         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3102                 if (all_ptypes[i] & ptype_mask) {
3103                         if (j < num)
3104                                 ptypes[j] = all_ptypes[i];
3105                         j++;
3106                 }
3107
3108         return j;
3109 }
3110
3111 int
3112 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3113                                  uint32_t *set_ptypes, unsigned int num)
3114 {
3115         const uint32_t valid_ptype_masks[] = {
3116                 RTE_PTYPE_L2_MASK,
3117                 RTE_PTYPE_L3_MASK,
3118                 RTE_PTYPE_L4_MASK,
3119                 RTE_PTYPE_TUNNEL_MASK,
3120                 RTE_PTYPE_INNER_L2_MASK,
3121                 RTE_PTYPE_INNER_L3_MASK,
3122                 RTE_PTYPE_INNER_L4_MASK,
3123         };
3124         const uint32_t *all_ptypes;
3125         struct rte_eth_dev *dev;
3126         uint32_t unused_mask;
3127         unsigned int i, j;
3128         int ret;
3129
3130         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3131         dev = &rte_eth_devices[port_id];
3132
3133         if (num > 0 && set_ptypes == NULL)
3134                 return -EINVAL;
3135
3136         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3137                         *dev->dev_ops->dev_ptypes_set == NULL) {
3138                 ret = 0;
3139                 goto ptype_unknown;
3140         }
3141
3142         if (ptype_mask == 0) {
3143                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3144                                 ptype_mask);
3145                 goto ptype_unknown;
3146         }
3147
3148         unused_mask = ptype_mask;
3149         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3150                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3151                 if (mask && mask != valid_ptype_masks[i]) {
3152                         ret = -EINVAL;
3153                         goto ptype_unknown;
3154                 }
3155                 unused_mask &= ~valid_ptype_masks[i];
3156         }
3157
3158         if (unused_mask) {
3159                 ret = -EINVAL;
3160                 goto ptype_unknown;
3161         }
3162
3163         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3164         if (all_ptypes == NULL) {
3165                 ret = 0;
3166                 goto ptype_unknown;
3167         }
3168
3169         /*
3170          * Accommodate as many set_ptypes as possible. If the supplied
3171          * set_ptypes array is insufficient fill it partially.
3172          */
3173         for (i = 0, j = 0; set_ptypes != NULL &&
3174                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3175                 if (ptype_mask & all_ptypes[i]) {
3176                         if (j < num - 1) {
3177                                 set_ptypes[j] = all_ptypes[i];
3178                                 j++;
3179                                 continue;
3180                         }
3181                         break;
3182                 }
3183         }
3184
3185         if (set_ptypes != NULL && j < num)
3186                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3187
3188         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3189
3190 ptype_unknown:
3191         if (num > 0)
3192                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3193
3194         return ret;
3195 }
3196
3197 int
3198 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3199 {
3200         struct rte_eth_dev *dev;
3201
3202         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3203         dev = &rte_eth_devices[port_id];
3204         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3205
3206         return 0;
3207 }
3208
3209 int
3210 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3211 {
3212         struct rte_eth_dev *dev;
3213
3214         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3215
3216         dev = &rte_eth_devices[port_id];
3217         *mtu = dev->data->mtu;
3218         return 0;
3219 }
3220
3221 int
3222 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3223 {
3224         int ret;
3225         struct rte_eth_dev_info dev_info;
3226         struct rte_eth_dev *dev;
3227
3228         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3229         dev = &rte_eth_devices[port_id];
3230         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3231
3232         /*
3233          * Check if the device supports dev_infos_get, if it does not
3234          * skip min_mtu/max_mtu validation here as this requires values
3235          * that are populated within the call to rte_eth_dev_info_get()
3236          * which relies on dev->dev_ops->dev_infos_get.
3237          */
3238         if (*dev->dev_ops->dev_infos_get != NULL) {
3239                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3240                 if (ret != 0)
3241                         return ret;
3242
3243                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3244                         return -EINVAL;
3245         }
3246
3247         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3248         if (!ret)
3249                 dev->data->mtu = mtu;
3250
3251         return eth_err(port_id, ret);
3252 }
3253
3254 int
3255 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3256 {
3257         struct rte_eth_dev *dev;
3258         int ret;
3259
3260         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3261         dev = &rte_eth_devices[port_id];
3262         if (!(dev->data->dev_conf.rxmode.offloads &
3263               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3264                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3265                         port_id);
3266                 return -ENOSYS;
3267         }
3268
3269         if (vlan_id > 4095) {
3270                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3271                         port_id, vlan_id);
3272                 return -EINVAL;
3273         }
3274         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3275
3276         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3277         if (ret == 0) {
3278                 struct rte_vlan_filter_conf *vfc;
3279                 int vidx;
3280                 int vbit;
3281
3282                 vfc = &dev->data->vlan_filter_conf;
3283                 vidx = vlan_id / 64;
3284                 vbit = vlan_id % 64;
3285
3286                 if (on)
3287                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3288                 else
3289                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3290         }
3291
3292         return eth_err(port_id, ret);
3293 }
3294
3295 int
3296 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3297                                     int on)
3298 {
3299         struct rte_eth_dev *dev;
3300
3301         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3302         dev = &rte_eth_devices[port_id];
3303         if (rx_queue_id >= dev->data->nb_rx_queues) {
3304                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3305                 return -EINVAL;
3306         }
3307
3308         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3309         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3310
3311         return 0;
3312 }
3313
3314 int
3315 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3316                                 enum rte_vlan_type vlan_type,
3317                                 uint16_t tpid)
3318 {
3319         struct rte_eth_dev *dev;
3320
3321         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3322         dev = &rte_eth_devices[port_id];
3323         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3324
3325         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3326                                                                tpid));
3327 }
3328
3329 int
3330 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3331 {
3332         struct rte_eth_dev_info dev_info;
3333         struct rte_eth_dev *dev;
3334         int ret = 0;
3335         int mask = 0;
3336         int cur, org = 0;
3337         uint64_t orig_offloads;
3338         uint64_t dev_offloads;
3339         uint64_t new_offloads;
3340
3341         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3342         dev = &rte_eth_devices[port_id];
3343
3344         /* save original values in case of failure */
3345         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3346         dev_offloads = orig_offloads;
3347
3348         /* check which option changed by application */
3349         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3350         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3351         if (cur != org) {
3352                 if (cur)
3353                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3354                 else
3355                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3356                 mask |= ETH_VLAN_STRIP_MASK;
3357         }
3358
3359         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3360         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3361         if (cur != org) {
3362                 if (cur)
3363                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3364                 else
3365                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3366                 mask |= ETH_VLAN_FILTER_MASK;
3367         }
3368
3369         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3370         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3371         if (cur != org) {
3372                 if (cur)
3373                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3374                 else
3375                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3376                 mask |= ETH_VLAN_EXTEND_MASK;
3377         }
3378
3379         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3380         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3381         if (cur != org) {
3382                 if (cur)
3383                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3384                 else
3385                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3386                 mask |= ETH_QINQ_STRIP_MASK;
3387         }
3388
3389         /*no change*/
3390         if (mask == 0)
3391                 return ret;
3392
3393         ret = rte_eth_dev_info_get(port_id, &dev_info);
3394         if (ret != 0)
3395                 return ret;
3396
3397         /* Rx VLAN offloading must be within its device capabilities */
3398         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3399                 new_offloads = dev_offloads & ~orig_offloads;
3400                 RTE_ETHDEV_LOG(ERR,
3401                         "Ethdev port_id=%u requested new added VLAN offloads "
3402                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3403                         "0x%" PRIx64 " in %s()\n",
3404                         port_id, new_offloads, dev_info.rx_offload_capa,
3405                         __func__);
3406                 return -EINVAL;
3407         }
3408
3409         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3410         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3411         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3412         if (ret) {
3413                 /* hit an error restore  original values */
3414                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3415         }
3416
3417         return eth_err(port_id, ret);
3418 }
3419
3420 int
3421 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3422 {
3423         struct rte_eth_dev *dev;
3424         uint64_t *dev_offloads;
3425         int ret = 0;
3426
3427         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3428         dev = &rte_eth_devices[port_id];
3429         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3430
3431         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3432                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3433
3434         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3435                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3436
3437         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3438                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3439
3440         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3441                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3442
3443         return ret;
3444 }
3445
3446 int
3447 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3448 {
3449         struct rte_eth_dev *dev;
3450
3451         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3452         dev = &rte_eth_devices[port_id];
3453         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3454
3455         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3456 }
3457
3458 int
3459 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3460 {
3461         struct rte_eth_dev *dev;
3462
3463         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3464         dev = &rte_eth_devices[port_id];
3465         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3466         memset(fc_conf, 0, sizeof(*fc_conf));
3467         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3468 }
3469
3470 int
3471 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3472 {
3473         struct rte_eth_dev *dev;
3474
3475         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3476         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3477                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3478                 return -EINVAL;
3479         }
3480
3481         dev = &rte_eth_devices[port_id];
3482         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3483         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3484 }
3485
3486 int
3487 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3488                                    struct rte_eth_pfc_conf *pfc_conf)
3489 {
3490         struct rte_eth_dev *dev;
3491
3492         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3493         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3494                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3495                 return -EINVAL;
3496         }
3497
3498         dev = &rte_eth_devices[port_id];
3499         /* High water, low water validation are device specific */
3500         if  (*dev->dev_ops->priority_flow_ctrl_set)
3501                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3502                                         (dev, pfc_conf));
3503         return -ENOTSUP;
3504 }
3505
3506 static int
3507 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3508                         uint16_t reta_size)
3509 {
3510         uint16_t i, num;
3511
3512         if (!reta_conf)
3513                 return -EINVAL;
3514
3515         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3516         for (i = 0; i < num; i++) {
3517                 if (reta_conf[i].mask)
3518                         return 0;
3519         }
3520
3521         return -EINVAL;
3522 }
3523
3524 static int
3525 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3526                          uint16_t reta_size,
3527                          uint16_t max_rxq)
3528 {
3529         uint16_t i, idx, shift;
3530
3531         if (!reta_conf)
3532                 return -EINVAL;
3533
3534         if (max_rxq == 0) {
3535                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3536                 return -EINVAL;
3537         }
3538
3539         for (i = 0; i < reta_size; i++) {
3540                 idx = i / RTE_RETA_GROUP_SIZE;
3541                 shift = i % RTE_RETA_GROUP_SIZE;
3542                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3543                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3544                         RTE_ETHDEV_LOG(ERR,
3545                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3546                                 idx, shift,
3547                                 reta_conf[idx].reta[shift], max_rxq);
3548                         return -EINVAL;
3549                 }
3550         }
3551
3552         return 0;
3553 }
3554
3555 int
3556 rte_eth_dev_rss_reta_update(uint16_t port_id,
3557                             struct rte_eth_rss_reta_entry64 *reta_conf,
3558                             uint16_t reta_size)
3559 {
3560         struct rte_eth_dev *dev;
3561         int ret;
3562
3563         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3564         /* Check mask bits */
3565         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3566         if (ret < 0)
3567                 return ret;
3568
3569         dev = &rte_eth_devices[port_id];
3570
3571         /* Check entry value */
3572         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3573                                 dev->data->nb_rx_queues);
3574         if (ret < 0)
3575                 return ret;
3576
3577         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3578         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3579                                                              reta_size));
3580 }
3581
3582 int
3583 rte_eth_dev_rss_reta_query(uint16_t port_id,
3584                            struct rte_eth_rss_reta_entry64 *reta_conf,
3585                            uint16_t reta_size)
3586 {
3587         struct rte_eth_dev *dev;
3588         int ret;
3589
3590         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3591
3592         /* Check mask bits */
3593         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3594         if (ret < 0)
3595                 return ret;
3596
3597         dev = &rte_eth_devices[port_id];
3598         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3599         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3600                                                             reta_size));
3601 }
3602
3603 int
3604 rte_eth_dev_rss_hash_update(uint16_t port_id,
3605                             struct rte_eth_rss_conf *rss_conf)
3606 {
3607         struct rte_eth_dev *dev;
3608         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3609         int ret;
3610
3611         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3612
3613         ret = rte_eth_dev_info_get(port_id, &dev_info);
3614         if (ret != 0)
3615                 return ret;
3616
3617         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3618
3619         dev = &rte_eth_devices[port_id];
3620         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3621             dev_info.flow_type_rss_offloads) {
3622                 RTE_ETHDEV_LOG(ERR,
3623                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3624                         port_id, rss_conf->rss_hf,
3625                         dev_info.flow_type_rss_offloads);
3626                 return -EINVAL;
3627         }
3628         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3629         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3630                                                                  rss_conf));
3631 }
3632
3633 int
3634 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3635                               struct rte_eth_rss_conf *rss_conf)
3636 {
3637         struct rte_eth_dev *dev;
3638
3639         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3640         dev = &rte_eth_devices[port_id];
3641         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3642         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3643                                                                    rss_conf));
3644 }
3645
3646 int
3647 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3648                                 struct rte_eth_udp_tunnel *udp_tunnel)
3649 {
3650         struct rte_eth_dev *dev;
3651
3652         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3653         if (udp_tunnel == NULL) {
3654                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3655                 return -EINVAL;
3656         }
3657
3658         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3659                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3660                 return -EINVAL;
3661         }
3662
3663         dev = &rte_eth_devices[port_id];
3664         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3665         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3666                                                                 udp_tunnel));
3667 }
3668
3669 int
3670 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3671                                    struct rte_eth_udp_tunnel *udp_tunnel)
3672 {
3673         struct rte_eth_dev *dev;
3674
3675         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3676         dev = &rte_eth_devices[port_id];
3677
3678         if (udp_tunnel == NULL) {
3679                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3680                 return -EINVAL;
3681         }
3682
3683         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3684                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3685                 return -EINVAL;
3686         }
3687
3688         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3689         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3690                                                                 udp_tunnel));
3691 }
3692
3693 int
3694 rte_eth_led_on(uint16_t port_id)
3695 {
3696         struct rte_eth_dev *dev;
3697
3698         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3699         dev = &rte_eth_devices[port_id];
3700         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3701         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3702 }
3703
3704 int
3705 rte_eth_led_off(uint16_t port_id)
3706 {
3707         struct rte_eth_dev *dev;
3708
3709         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3710         dev = &rte_eth_devices[port_id];
3711         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3712         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3713 }
3714
3715 int
3716 rte_eth_fec_get_capability(uint16_t port_id,
3717                            struct rte_eth_fec_capa *speed_fec_capa,
3718                            unsigned int num)
3719 {
3720         struct rte_eth_dev *dev;
3721         int ret;
3722
3723         if (speed_fec_capa == NULL && num > 0)
3724                 return -EINVAL;
3725
3726         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3727         dev = &rte_eth_devices[port_id];
3728         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
3729         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
3730
3731         return ret;
3732 }
3733
3734 int
3735 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
3736 {
3737         struct rte_eth_dev *dev;
3738
3739         if (fec_capa == NULL)
3740                 return -EINVAL;
3741
3742         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3743         dev = &rte_eth_devices[port_id];
3744         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
3745         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
3746 }
3747
3748 int
3749 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
3750 {
3751         struct rte_eth_dev *dev;
3752
3753         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3754         dev = &rte_eth_devices[port_id];
3755         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
3756         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
3757 }
3758
3759 /*
3760  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3761  * an empty spot.
3762  */
3763 static int
3764 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3765 {
3766         struct rte_eth_dev_info dev_info;
3767         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3768         unsigned i;
3769         int ret;
3770
3771         ret = rte_eth_dev_info_get(port_id, &dev_info);
3772         if (ret != 0)
3773                 return -1;
3774
3775         for (i = 0; i < dev_info.max_mac_addrs; i++)
3776                 if (memcmp(addr, &dev->data->mac_addrs[i],
3777                                 RTE_ETHER_ADDR_LEN) == 0)
3778                         return i;
3779
3780         return -1;
3781 }
3782
3783 static const struct rte_ether_addr null_mac_addr;
3784
3785 int
3786 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3787                         uint32_t pool)
3788 {
3789         struct rte_eth_dev *dev;
3790         int index;
3791         uint64_t pool_mask;
3792         int ret;
3793
3794         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3795         dev = &rte_eth_devices[port_id];
3796         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3797
3798         if (rte_is_zero_ether_addr(addr)) {
3799                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3800                         port_id);
3801                 return -EINVAL;
3802         }
3803         if (pool >= ETH_64_POOLS) {
3804                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3805                 return -EINVAL;
3806         }
3807
3808         index = get_mac_addr_index(port_id, addr);
3809         if (index < 0) {
3810                 index = get_mac_addr_index(port_id, &null_mac_addr);
3811                 if (index < 0) {
3812                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3813                                 port_id);
3814                         return -ENOSPC;
3815                 }
3816         } else {
3817                 pool_mask = dev->data->mac_pool_sel[index];
3818
3819                 /* Check if both MAC address and pool is already there, and do nothing */
3820                 if (pool_mask & (1ULL << pool))
3821                         return 0;
3822         }
3823
3824         /* Update NIC */
3825         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3826
3827         if (ret == 0) {
3828                 /* Update address in NIC data structure */
3829                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3830
3831                 /* Update pool bitmap in NIC data structure */
3832                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3833         }
3834
3835         return eth_err(port_id, ret);
3836 }
3837
3838 int
3839 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
3840 {
3841         struct rte_eth_dev *dev;
3842         int index;
3843
3844         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3845         dev = &rte_eth_devices[port_id];
3846         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3847
3848         index = get_mac_addr_index(port_id, addr);
3849         if (index == 0) {
3850                 RTE_ETHDEV_LOG(ERR,
3851                         "Port %u: Cannot remove default MAC address\n",
3852                         port_id);
3853                 return -EADDRINUSE;
3854         } else if (index < 0)
3855                 return 0;  /* Do nothing if address wasn't found */
3856
3857         /* Update NIC */
3858         (*dev->dev_ops->mac_addr_remove)(dev, index);
3859
3860         /* Update address in NIC data structure */
3861         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3862
3863         /* reset pool bitmap */
3864         dev->data->mac_pool_sel[index] = 0;
3865
3866         return 0;
3867 }
3868
3869 int
3870 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
3871 {
3872         struct rte_eth_dev *dev;
3873         int ret;
3874
3875         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3876
3877         if (!rte_is_valid_assigned_ether_addr(addr))
3878                 return -EINVAL;
3879
3880         dev = &rte_eth_devices[port_id];
3881         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3882
3883         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3884         if (ret < 0)
3885                 return ret;
3886
3887         /* Update default address in NIC data structure */
3888         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3889
3890         return 0;
3891 }
3892
3893
3894 /*
3895  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3896  * an empty spot.
3897  */
3898 static int
3899 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3900 {
3901         struct rte_eth_dev_info dev_info;
3902         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3903         unsigned i;
3904         int ret;
3905
3906         ret = rte_eth_dev_info_get(port_id, &dev_info);
3907         if (ret != 0)
3908                 return -1;
3909
3910         if (!dev->data->hash_mac_addrs)
3911                 return -1;
3912
3913         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3914                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3915                         RTE_ETHER_ADDR_LEN) == 0)
3916                         return i;
3917
3918         return -1;
3919 }
3920
3921 int
3922 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3923                                 uint8_t on)
3924 {
3925         int index;
3926         int ret;
3927         struct rte_eth_dev *dev;
3928
3929         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3930
3931         dev = &rte_eth_devices[port_id];
3932         if (rte_is_zero_ether_addr(addr)) {
3933                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3934                         port_id);
3935                 return -EINVAL;
3936         }
3937
3938         index = get_hash_mac_addr_index(port_id, addr);
3939         /* Check if it's already there, and do nothing */
3940         if ((index >= 0) && on)
3941                 return 0;
3942
3943         if (index < 0) {
3944                 if (!on) {
3945                         RTE_ETHDEV_LOG(ERR,
3946                                 "Port %u: the MAC address was not set in UTA\n",
3947                                 port_id);
3948                         return -EINVAL;
3949                 }
3950
3951                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3952                 if (index < 0) {
3953                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3954                                 port_id);
3955                         return -ENOSPC;
3956                 }
3957         }
3958
3959         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3960         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3961         if (ret == 0) {
3962                 /* Update address in NIC data structure */
3963                 if (on)
3964                         rte_ether_addr_copy(addr,
3965                                         &dev->data->hash_mac_addrs[index]);
3966                 else
3967                         rte_ether_addr_copy(&null_mac_addr,
3968                                         &dev->data->hash_mac_addrs[index]);
3969         }
3970
3971         return eth_err(port_id, ret);
3972 }
3973
3974 int
3975 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3976 {
3977         struct rte_eth_dev *dev;
3978
3979         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3980
3981         dev = &rte_eth_devices[port_id];
3982
3983         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3984         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3985                                                                        on));
3986 }
3987
3988 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3989                                         uint16_t tx_rate)
3990 {
3991         struct rte_eth_dev *dev;
3992         struct rte_eth_dev_info dev_info;
3993         struct rte_eth_link link;
3994         int ret;
3995
3996         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3997
3998         ret = rte_eth_dev_info_get(port_id, &dev_info);
3999         if (ret != 0)
4000                 return ret;
4001
4002         dev = &rte_eth_devices[port_id];
4003         link = dev->data->dev_link;
4004
4005         if (queue_idx > dev_info.max_tx_queues) {
4006                 RTE_ETHDEV_LOG(ERR,
4007                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4008                         port_id, queue_idx);
4009                 return -EINVAL;
4010         }
4011
4012         if (tx_rate > link.link_speed) {
4013                 RTE_ETHDEV_LOG(ERR,
4014                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4015                         tx_rate, link.link_speed);
4016                 return -EINVAL;
4017         }
4018
4019         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4020         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4021                                                         queue_idx, tx_rate));
4022 }
4023
4024 int
4025 rte_eth_mirror_rule_set(uint16_t port_id,
4026                         struct rte_eth_mirror_conf *mirror_conf,
4027                         uint8_t rule_id, uint8_t on)
4028 {
4029         struct rte_eth_dev *dev;
4030
4031         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4032         if (mirror_conf->rule_type == 0) {
4033                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4034                 return -EINVAL;
4035         }
4036
4037         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4038                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4039                         ETH_64_POOLS - 1);
4040                 return -EINVAL;
4041         }
4042
4043         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4044              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4045             (mirror_conf->pool_mask == 0)) {
4046                 RTE_ETHDEV_LOG(ERR,
4047                         "Invalid mirror pool, pool mask can not be 0\n");
4048                 return -EINVAL;
4049         }
4050
4051         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4052             mirror_conf->vlan.vlan_mask == 0) {
4053                 RTE_ETHDEV_LOG(ERR,
4054                         "Invalid vlan mask, vlan mask can not be 0\n");
4055                 return -EINVAL;
4056         }
4057
4058         dev = &rte_eth_devices[port_id];
4059         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4060
4061         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4062                                                 mirror_conf, rule_id, on));
4063 }
4064
4065 int
4066 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4067 {
4068         struct rte_eth_dev *dev;
4069
4070         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4071
4072         dev = &rte_eth_devices[port_id];
4073         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4074
4075         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
4076                                                                    rule_id));
4077 }
4078
4079 RTE_INIT(eth_dev_init_cb_lists)
4080 {
4081         int i;
4082
4083         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4084                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4085 }
4086
4087 int
4088 rte_eth_dev_callback_register(uint16_t port_id,
4089                         enum rte_eth_event_type event,
4090                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4091 {
4092         struct rte_eth_dev *dev;
4093         struct rte_eth_dev_callback *user_cb;
4094         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4095         uint16_t last_port;
4096
4097         if (!cb_fn)
4098                 return -EINVAL;
4099
4100         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4101                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4102                 return -EINVAL;
4103         }
4104
4105         if (port_id == RTE_ETH_ALL) {
4106                 next_port = 0;
4107                 last_port = RTE_MAX_ETHPORTS - 1;
4108         } else {
4109                 next_port = last_port = port_id;
4110         }
4111
4112         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4113
4114         do {
4115                 dev = &rte_eth_devices[next_port];
4116
4117                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4118                         if (user_cb->cb_fn == cb_fn &&
4119                                 user_cb->cb_arg == cb_arg &&
4120                                 user_cb->event == event) {
4121                                 break;
4122                         }
4123                 }
4124
4125                 /* create a new callback. */
4126                 if (user_cb == NULL) {
4127                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4128                                 sizeof(struct rte_eth_dev_callback), 0);
4129                         if (user_cb != NULL) {
4130                                 user_cb->cb_fn = cb_fn;
4131                                 user_cb->cb_arg = cb_arg;
4132                                 user_cb->event = event;
4133                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4134                                                   user_cb, next);
4135                         } else {
4136                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4137                                 rte_eth_dev_callback_unregister(port_id, event,
4138                                                                 cb_fn, cb_arg);
4139                                 return -ENOMEM;
4140                         }
4141
4142                 }
4143         } while (++next_port <= last_port);
4144
4145         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4146         return 0;
4147 }
4148
4149 int
4150 rte_eth_dev_callback_unregister(uint16_t port_id,
4151                         enum rte_eth_event_type event,
4152                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4153 {
4154         int ret;
4155         struct rte_eth_dev *dev;
4156         struct rte_eth_dev_callback *cb, *next;
4157         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4158         uint16_t last_port;
4159
4160         if (!cb_fn)
4161                 return -EINVAL;
4162
4163         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4164                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4165                 return -EINVAL;
4166         }
4167
4168         if (port_id == RTE_ETH_ALL) {
4169                 next_port = 0;
4170                 last_port = RTE_MAX_ETHPORTS - 1;
4171         } else {
4172                 next_port = last_port = port_id;
4173         }
4174
4175         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4176
4177         do {
4178                 dev = &rte_eth_devices[next_port];
4179                 ret = 0;
4180                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4181                      cb = next) {
4182
4183                         next = TAILQ_NEXT(cb, next);
4184
4185                         if (cb->cb_fn != cb_fn || cb->event != event ||
4186                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4187                                 continue;
4188
4189                         /*
4190                          * if this callback is not executing right now,
4191                          * then remove it.
4192                          */
4193                         if (cb->active == 0) {
4194                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4195                                 rte_free(cb);
4196                         } else {
4197                                 ret = -EAGAIN;
4198                         }
4199                 }
4200         } while (++next_port <= last_port);
4201
4202         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4203         return ret;
4204 }
4205
4206 int
4207 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4208         enum rte_eth_event_type event, void *ret_param)
4209 {
4210         struct rte_eth_dev_callback *cb_lst;
4211         struct rte_eth_dev_callback dev_cb;
4212         int rc = 0;
4213
4214         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4215         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4216                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4217                         continue;
4218                 dev_cb = *cb_lst;
4219                 cb_lst->active = 1;
4220                 if (ret_param != NULL)
4221                         dev_cb.ret_param = ret_param;
4222
4223                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4224                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4225                                 dev_cb.cb_arg, dev_cb.ret_param);
4226                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4227                 cb_lst->active = 0;
4228         }
4229         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4230         return rc;
4231 }
4232
4233 void
4234 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4235 {
4236         if (dev == NULL)
4237                 return;
4238
4239         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4240
4241         dev->state = RTE_ETH_DEV_ATTACHED;
4242 }
4243
4244 int
4245 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4246 {
4247         uint32_t vec;
4248         struct rte_eth_dev *dev;
4249         struct rte_intr_handle *intr_handle;
4250         uint16_t qid;
4251         int rc;
4252
4253         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4254
4255         dev = &rte_eth_devices[port_id];
4256
4257         if (!dev->intr_handle) {
4258                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4259                 return -ENOTSUP;
4260         }
4261
4262         intr_handle = dev->intr_handle;
4263         if (!intr_handle->intr_vec) {
4264                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4265                 return -EPERM;
4266         }
4267
4268         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4269                 vec = intr_handle->intr_vec[qid];
4270                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4271                 if (rc && rc != -EEXIST) {
4272                         RTE_ETHDEV_LOG(ERR,
4273                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4274                                 port_id, qid, op, epfd, vec);
4275                 }
4276         }
4277
4278         return 0;
4279 }
4280
4281 int
4282 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4283 {
4284         struct rte_intr_handle *intr_handle;
4285         struct rte_eth_dev *dev;
4286         unsigned int efd_idx;
4287         uint32_t vec;
4288         int fd;
4289
4290         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4291
4292         dev = &rte_eth_devices[port_id];
4293
4294         if (queue_id >= dev->data->nb_rx_queues) {
4295                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4296                 return -1;
4297         }
4298
4299         if (!dev->intr_handle) {
4300                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4301                 return -1;
4302         }
4303
4304         intr_handle = dev->intr_handle;
4305         if (!intr_handle->intr_vec) {
4306                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4307                 return -1;
4308         }
4309
4310         vec = intr_handle->intr_vec[queue_id];
4311         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4312                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4313         fd = intr_handle->efds[efd_idx];
4314
4315         return fd;
4316 }
4317
4318 static inline int
4319 eth_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4320                 const char *ring_name)
4321 {
4322         return snprintf(name, len, "eth_p%d_q%d_%s",
4323                         port_id, queue_id, ring_name);
4324 }
4325
4326 const struct rte_memzone *
4327 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4328                          uint16_t queue_id, size_t size, unsigned align,
4329                          int socket_id)
4330 {
4331         char z_name[RTE_MEMZONE_NAMESIZE];
4332         const struct rte_memzone *mz;
4333         int rc;
4334
4335         rc = eth_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4336                         queue_id, ring_name);
4337         if (rc >= RTE_MEMZONE_NAMESIZE) {
4338                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4339                 rte_errno = ENAMETOOLONG;
4340                 return NULL;
4341         }
4342
4343         mz = rte_memzone_lookup(z_name);
4344         if (mz) {
4345                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4346                                 size > mz->len ||
4347                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4348                         RTE_ETHDEV_LOG(ERR,
4349                                 "memzone %s does not justify the requested attributes\n",
4350                                 mz->name);
4351                         return NULL;
4352                 }
4353
4354                 return mz;
4355         }
4356
4357         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4358                         RTE_MEMZONE_IOVA_CONTIG, align);
4359 }
4360
4361 int
4362 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4363                 uint16_t queue_id)
4364 {
4365         char z_name[RTE_MEMZONE_NAMESIZE];
4366         const struct rte_memzone *mz;
4367         int rc = 0;
4368
4369         rc = eth_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4370                         queue_id, ring_name);
4371         if (rc >= RTE_MEMZONE_NAMESIZE) {
4372                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4373                 return -ENAMETOOLONG;
4374         }
4375
4376         mz = rte_memzone_lookup(z_name);
4377         if (mz)
4378                 rc = rte_memzone_free(mz);
4379         else
4380                 rc = -ENOENT;
4381
4382         return rc;
4383 }
4384
4385 int
4386 rte_eth_dev_create(struct rte_device *device, const char *name,
4387         size_t priv_data_size,
4388         ethdev_bus_specific_init ethdev_bus_specific_init,
4389         void *bus_init_params,
4390         ethdev_init_t ethdev_init, void *init_params)
4391 {
4392         struct rte_eth_dev *ethdev;
4393         int retval;
4394
4395         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4396
4397         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4398                 ethdev = rte_eth_dev_allocate(name);
4399                 if (!ethdev)
4400                         return -ENODEV;
4401
4402                 if (priv_data_size) {
4403                         ethdev->data->dev_private = rte_zmalloc_socket(
4404                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4405                                 device->numa_node);
4406
4407                         if (!ethdev->data->dev_private) {
4408                                 RTE_ETHDEV_LOG(ERR,
4409                                         "failed to allocate private data\n");
4410                                 retval = -ENOMEM;
4411                                 goto probe_failed;
4412                         }
4413                 }
4414         } else {
4415                 ethdev = rte_eth_dev_attach_secondary(name);
4416                 if (!ethdev) {
4417                         RTE_ETHDEV_LOG(ERR,
4418                                 "secondary process attach failed, ethdev doesn't exist\n");
4419                         return  -ENODEV;
4420                 }
4421         }
4422
4423         ethdev->device = device;
4424
4425         if (ethdev_bus_specific_init) {
4426                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4427                 if (retval) {
4428                         RTE_ETHDEV_LOG(ERR,
4429                                 "ethdev bus specific initialisation failed\n");
4430                         goto probe_failed;
4431                 }
4432         }
4433
4434         retval = ethdev_init(ethdev, init_params);
4435         if (retval) {
4436                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4437                 goto probe_failed;
4438         }
4439
4440         rte_eth_dev_probing_finish(ethdev);
4441
4442         return retval;
4443
4444 probe_failed:
4445         rte_eth_dev_release_port(ethdev);
4446         return retval;
4447 }
4448
4449 int
4450 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4451         ethdev_uninit_t ethdev_uninit)
4452 {
4453         int ret;
4454
4455         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4456         if (!ethdev)
4457                 return -ENODEV;
4458
4459         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4460
4461         ret = ethdev_uninit(ethdev);
4462         if (ret)
4463                 return ret;
4464
4465         return rte_eth_dev_release_port(ethdev);
4466 }
4467
4468 int
4469 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4470                           int epfd, int op, void *data)
4471 {
4472         uint32_t vec;
4473         struct rte_eth_dev *dev;
4474         struct rte_intr_handle *intr_handle;
4475         int rc;
4476
4477         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4478
4479         dev = &rte_eth_devices[port_id];
4480         if (queue_id >= dev->data->nb_rx_queues) {
4481                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4482                 return -EINVAL;
4483         }
4484
4485         if (!dev->intr_handle) {
4486                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4487                 return -ENOTSUP;
4488         }
4489
4490         intr_handle = dev->intr_handle;
4491         if (!intr_handle->intr_vec) {
4492                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4493                 return -EPERM;
4494         }
4495
4496         vec = intr_handle->intr_vec[queue_id];
4497         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4498         if (rc && rc != -EEXIST) {
4499                 RTE_ETHDEV_LOG(ERR,
4500                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4501                         port_id, queue_id, op, epfd, vec);
4502                 return rc;
4503         }
4504
4505         return 0;
4506 }
4507
4508 int
4509 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4510                            uint16_t queue_id)
4511 {
4512         struct rte_eth_dev *dev;
4513         int ret;
4514
4515         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4516
4517         dev = &rte_eth_devices[port_id];
4518
4519         ret = eth_dev_validate_rx_queue(dev, queue_id);
4520         if (ret != 0)
4521                 return ret;
4522
4523         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4524         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4525                                                                 queue_id));
4526 }
4527
4528 int
4529 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4530                             uint16_t queue_id)
4531 {
4532         struct rte_eth_dev *dev;
4533         int ret;
4534
4535         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4536
4537         dev = &rte_eth_devices[port_id];
4538
4539         ret = eth_dev_validate_rx_queue(dev, queue_id);
4540         if (ret != 0)
4541                 return ret;
4542
4543         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4544         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4545                                                                 queue_id));
4546 }
4547
4548
4549 int
4550 rte_eth_dev_filter_supported(uint16_t port_id,
4551                              enum rte_filter_type filter_type)
4552 {
4553         struct rte_eth_dev *dev;
4554
4555         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4556
4557         dev = &rte_eth_devices[port_id];
4558         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4559         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4560                                 RTE_ETH_FILTER_NOP, NULL);
4561 }
4562
4563 int
4564 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4565                         enum rte_filter_op filter_op, void *arg)
4566 {
4567         struct rte_eth_dev *dev;
4568
4569         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4570
4571         dev = &rte_eth_devices[port_id];
4572         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4573         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4574                                                              filter_op, arg));
4575 }
4576
4577 const struct rte_eth_rxtx_callback *
4578 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4579                 rte_rx_callback_fn fn, void *user_param)
4580 {
4581 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4582         rte_errno = ENOTSUP;
4583         return NULL;
4584 #endif
4585         struct rte_eth_dev *dev;
4586
4587         /* check input parameters */
4588         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4589                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4590                 rte_errno = EINVAL;
4591                 return NULL;
4592         }
4593         dev = &rte_eth_devices[port_id];
4594         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4595                 rte_errno = EINVAL;
4596                 return NULL;
4597         }
4598         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4599
4600         if (cb == NULL) {
4601                 rte_errno = ENOMEM;
4602                 return NULL;
4603         }
4604
4605         cb->fn.rx = fn;
4606         cb->param = user_param;
4607
4608         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4609         /* Add the callbacks in fifo order. */
4610         struct rte_eth_rxtx_callback *tail =
4611                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4612
4613         if (!tail) {
4614                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4615
4616         } else {
4617                 while (tail->next)
4618                         tail = tail->next;
4619                 tail->next = cb;
4620         }
4621         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4622
4623         return cb;
4624 }
4625
4626 const struct rte_eth_rxtx_callback *
4627 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4628                 rte_rx_callback_fn fn, void *user_param)
4629 {
4630 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4631         rte_errno = ENOTSUP;
4632         return NULL;
4633 #endif
4634         /* check input parameters */
4635         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4636                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4637                 rte_errno = EINVAL;
4638                 return NULL;
4639         }
4640
4641         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4642
4643         if (cb == NULL) {
4644                 rte_errno = ENOMEM;
4645                 return NULL;
4646         }
4647
4648         cb->fn.rx = fn;
4649         cb->param = user_param;
4650
4651         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4652         /* Add the callbacks at first position */
4653         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4654         rte_smp_wmb();
4655         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4656         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4657
4658         return cb;
4659 }
4660
4661 const struct rte_eth_rxtx_callback *
4662 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4663                 rte_tx_callback_fn fn, void *user_param)
4664 {
4665 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4666         rte_errno = ENOTSUP;
4667         return NULL;
4668 #endif
4669         struct rte_eth_dev *dev;
4670
4671         /* check input parameters */
4672         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4673                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4674                 rte_errno = EINVAL;
4675                 return NULL;
4676         }
4677
4678         dev = &rte_eth_devices[port_id];
4679         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4680                 rte_errno = EINVAL;
4681                 return NULL;
4682         }
4683
4684         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4685
4686         if (cb == NULL) {
4687                 rte_errno = ENOMEM;
4688                 return NULL;
4689         }
4690
4691         cb->fn.tx = fn;
4692         cb->param = user_param;
4693
4694         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4695         /* Add the callbacks in fifo order. */
4696         struct rte_eth_rxtx_callback *tail =
4697                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4698
4699         if (!tail) {
4700                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4701
4702         } else {
4703                 while (tail->next)
4704                         tail = tail->next;
4705                 tail->next = cb;
4706         }
4707         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4708
4709         return cb;
4710 }
4711
4712 int
4713 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4714                 const struct rte_eth_rxtx_callback *user_cb)
4715 {
4716 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4717         return -ENOTSUP;
4718 #endif
4719         /* Check input parameters. */
4720         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4721         if (user_cb == NULL ||
4722                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4723                 return -EINVAL;
4724
4725         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4726         struct rte_eth_rxtx_callback *cb;
4727         struct rte_eth_rxtx_callback **prev_cb;
4728         int ret = -EINVAL;
4729
4730         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4731         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4732         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4733                 cb = *prev_cb;
4734                 if (cb == user_cb) {
4735                         /* Remove the user cb from the callback list. */
4736                         *prev_cb = cb->next;
4737                         ret = 0;
4738                         break;
4739                 }
4740         }
4741         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4742
4743         return ret;
4744 }
4745
4746 int
4747 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4748                 const struct rte_eth_rxtx_callback *user_cb)
4749 {
4750 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4751         return -ENOTSUP;
4752 #endif
4753         /* Check input parameters. */
4754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4755         if (user_cb == NULL ||
4756                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4757                 return -EINVAL;
4758
4759         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4760         int ret = -EINVAL;
4761         struct rte_eth_rxtx_callback *cb;
4762         struct rte_eth_rxtx_callback **prev_cb;
4763
4764         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4765         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4766         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4767                 cb = *prev_cb;
4768                 if (cb == user_cb) {
4769                         /* Remove the user cb from the callback list. */
4770                         *prev_cb = cb->next;
4771                         ret = 0;
4772                         break;
4773                 }
4774         }
4775         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4776
4777         return ret;
4778 }
4779
4780 int
4781 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4782         struct rte_eth_rxq_info *qinfo)
4783 {
4784         struct rte_eth_dev *dev;
4785
4786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4787
4788         if (qinfo == NULL)
4789                 return -EINVAL;
4790
4791         dev = &rte_eth_devices[port_id];
4792         if (queue_id >= dev->data->nb_rx_queues) {
4793                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4794                 return -EINVAL;
4795         }
4796
4797         if (dev->data->rx_queues == NULL ||
4798                         dev->data->rx_queues[queue_id] == NULL) {
4799                 RTE_ETHDEV_LOG(ERR,
4800                                "Rx queue %"PRIu16" of device with port_id=%"
4801                                PRIu16" has not been setup\n",
4802                                queue_id, port_id);
4803                 return -EINVAL;
4804         }
4805
4806         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4807                 RTE_ETHDEV_LOG(INFO,
4808                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4809                         queue_id, port_id);
4810                 return -EINVAL;
4811         }
4812
4813         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4814
4815         memset(qinfo, 0, sizeof(*qinfo));
4816         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4817         return 0;
4818 }
4819
4820 int
4821 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4822         struct rte_eth_txq_info *qinfo)
4823 {
4824         struct rte_eth_dev *dev;
4825
4826         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4827
4828         if (qinfo == NULL)
4829                 return -EINVAL;
4830
4831         dev = &rte_eth_devices[port_id];
4832         if (queue_id >= dev->data->nb_tx_queues) {
4833                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4834                 return -EINVAL;
4835         }
4836
4837         if (dev->data->tx_queues == NULL ||
4838                         dev->data->tx_queues[queue_id] == NULL) {
4839                 RTE_ETHDEV_LOG(ERR,
4840                                "Tx queue %"PRIu16" of device with port_id=%"
4841                                PRIu16" has not been setup\n",
4842                                queue_id, port_id);
4843                 return -EINVAL;
4844         }
4845
4846         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4847                 RTE_ETHDEV_LOG(INFO,
4848                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4849                         queue_id, port_id);
4850                 return -EINVAL;
4851         }
4852
4853         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4854
4855         memset(qinfo, 0, sizeof(*qinfo));
4856         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4857
4858         return 0;
4859 }
4860
4861 int
4862 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4863                           struct rte_eth_burst_mode *mode)
4864 {
4865         struct rte_eth_dev *dev;
4866
4867         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4868
4869         if (mode == NULL)
4870                 return -EINVAL;
4871
4872         dev = &rte_eth_devices[port_id];
4873
4874         if (queue_id >= dev->data->nb_rx_queues) {
4875                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4876                 return -EINVAL;
4877         }
4878
4879         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
4880         memset(mode, 0, sizeof(*mode));
4881         return eth_err(port_id,
4882                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
4883 }
4884
4885 int
4886 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4887                           struct rte_eth_burst_mode *mode)
4888 {
4889         struct rte_eth_dev *dev;
4890
4891         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4892
4893         if (mode == NULL)
4894                 return -EINVAL;
4895
4896         dev = &rte_eth_devices[port_id];
4897
4898         if (queue_id >= dev->data->nb_tx_queues) {
4899                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4900                 return -EINVAL;
4901         }
4902
4903         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
4904         memset(mode, 0, sizeof(*mode));
4905         return eth_err(port_id,
4906                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
4907 }
4908
4909 int
4910 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4911                              struct rte_ether_addr *mc_addr_set,
4912                              uint32_t nb_mc_addr)
4913 {
4914         struct rte_eth_dev *dev;
4915
4916         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4917
4918         dev = &rte_eth_devices[port_id];
4919         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4920         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4921                                                 mc_addr_set, nb_mc_addr));
4922 }
4923
4924 int
4925 rte_eth_timesync_enable(uint16_t port_id)
4926 {
4927         struct rte_eth_dev *dev;
4928
4929         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4930         dev = &rte_eth_devices[port_id];
4931
4932         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4933         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4934 }
4935
4936 int
4937 rte_eth_timesync_disable(uint16_t port_id)
4938 {
4939         struct rte_eth_dev *dev;
4940
4941         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4942         dev = &rte_eth_devices[port_id];
4943
4944         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4945         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4946 }
4947
4948 int
4949 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4950                                    uint32_t flags)
4951 {
4952         struct rte_eth_dev *dev;
4953
4954         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4955         dev = &rte_eth_devices[port_id];
4956
4957         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4958         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4959                                 (dev, timestamp, flags));
4960 }
4961
4962 int
4963 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4964                                    struct timespec *timestamp)
4965 {
4966         struct rte_eth_dev *dev;
4967
4968         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4969         dev = &rte_eth_devices[port_id];
4970
4971         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4972         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4973                                 (dev, timestamp));
4974 }
4975
4976 int
4977 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4978 {
4979         struct rte_eth_dev *dev;
4980
4981         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4982         dev = &rte_eth_devices[port_id];
4983
4984         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4985         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4986                                                                       delta));
4987 }
4988
4989 int
4990 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4991 {
4992         struct rte_eth_dev *dev;
4993
4994         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4995         dev = &rte_eth_devices[port_id];
4996
4997         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4998         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4999                                                                 timestamp));
5000 }
5001
5002 int
5003 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5004 {
5005         struct rte_eth_dev *dev;
5006
5007         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5008         dev = &rte_eth_devices[port_id];
5009
5010         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5011         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5012                                                                 timestamp));
5013 }
5014
5015 int
5016 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5017 {
5018         struct rte_eth_dev *dev;
5019
5020         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5021         dev = &rte_eth_devices[port_id];
5022
5023         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5024         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5025 }
5026
5027 int
5028 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5029 {
5030         struct rte_eth_dev *dev;
5031
5032         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5033
5034         dev = &rte_eth_devices[port_id];
5035         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5036         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5037 }
5038
5039 int
5040 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5041 {
5042         struct rte_eth_dev *dev;
5043
5044         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5045
5046         dev = &rte_eth_devices[port_id];
5047         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5048         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5049 }
5050
5051 int
5052 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5053 {
5054         struct rte_eth_dev *dev;
5055
5056         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5057
5058         dev = &rte_eth_devices[port_id];
5059         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5060         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5061 }
5062
5063 int
5064 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5065 {
5066         struct rte_eth_dev *dev;
5067
5068         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5069
5070         dev = &rte_eth_devices[port_id];
5071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5072         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5073 }
5074
5075 int
5076 rte_eth_dev_get_module_info(uint16_t port_id,
5077                             struct rte_eth_dev_module_info *modinfo)
5078 {
5079         struct rte_eth_dev *dev;
5080
5081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5082
5083         dev = &rte_eth_devices[port_id];
5084         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5085         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5086 }
5087
5088 int
5089 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5090                               struct rte_dev_eeprom_info *info)
5091 {
5092         struct rte_eth_dev *dev;
5093
5094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5095
5096         dev = &rte_eth_devices[port_id];
5097         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5098         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5099 }
5100
5101 int
5102 rte_eth_dev_get_dcb_info(uint16_t port_id,
5103                              struct rte_eth_dcb_info *dcb_info)
5104 {
5105         struct rte_eth_dev *dev;
5106
5107         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5108
5109         dev = &rte_eth_devices[port_id];
5110         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5111
5112         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5113         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5114 }
5115
5116 int
5117 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
5118                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
5119 {
5120         struct rte_eth_dev *dev;
5121
5122         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5123         if (l2_tunnel == NULL) {
5124                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
5125                 return -EINVAL;
5126         }
5127
5128         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
5129                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
5130                 return -EINVAL;
5131         }
5132
5133         dev = &rte_eth_devices[port_id];
5134         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
5135                                 -ENOTSUP);
5136         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
5137                                                                 l2_tunnel));
5138 }
5139
5140 int
5141 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
5142                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
5143                                   uint32_t mask,
5144                                   uint8_t en)
5145 {
5146         struct rte_eth_dev *dev;
5147
5148         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5149
5150         if (l2_tunnel == NULL) {
5151                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
5152                 return -EINVAL;
5153         }
5154
5155         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
5156                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
5157                 return -EINVAL;
5158         }
5159
5160         if (mask == 0) {
5161                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
5162                 return -EINVAL;
5163         }
5164
5165         dev = &rte_eth_devices[port_id];
5166         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
5167                                 -ENOTSUP);
5168         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
5169                                                         l2_tunnel, mask, en));
5170 }
5171
5172 static void
5173 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5174                            const struct rte_eth_desc_lim *desc_lim)
5175 {
5176         if (desc_lim->nb_align != 0)
5177                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5178
5179         if (desc_lim->nb_max != 0)
5180                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5181
5182         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5183 }
5184
5185 int
5186 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5187                                  uint16_t *nb_rx_desc,
5188                                  uint16_t *nb_tx_desc)
5189 {
5190         struct rte_eth_dev_info dev_info;
5191         int ret;
5192
5193         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5194
5195         ret = rte_eth_dev_info_get(port_id, &dev_info);
5196         if (ret != 0)
5197                 return ret;
5198
5199         if (nb_rx_desc != NULL)
5200                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5201
5202         if (nb_tx_desc != NULL)
5203                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5204
5205         return 0;
5206 }
5207
5208 int
5209 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5210                                    struct rte_eth_hairpin_cap *cap)
5211 {
5212         struct rte_eth_dev *dev;
5213
5214         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
5215
5216         dev = &rte_eth_devices[port_id];
5217         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5218         memset(cap, 0, sizeof(*cap));
5219         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5220 }
5221
5222 int
5223 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5224 {
5225         if (dev->data->rx_queue_state[queue_id] ==
5226             RTE_ETH_QUEUE_STATE_HAIRPIN)
5227                 return 1;
5228         return 0;
5229 }
5230
5231 int
5232 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5233 {
5234         if (dev->data->tx_queue_state[queue_id] ==
5235             RTE_ETH_QUEUE_STATE_HAIRPIN)
5236                 return 1;
5237         return 0;
5238 }
5239
5240 int
5241 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5242 {
5243         struct rte_eth_dev *dev;
5244
5245         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5246
5247         if (pool == NULL)
5248                 return -EINVAL;
5249
5250         dev = &rte_eth_devices[port_id];
5251
5252         if (*dev->dev_ops->pool_ops_supported == NULL)
5253                 return 1; /* all pools are supported */
5254
5255         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5256 }
5257
5258 /**
5259  * A set of values to describe the possible states of a switch domain.
5260  */
5261 enum rte_eth_switch_domain_state {
5262         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5263         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5264 };
5265
5266 /**
5267  * Array of switch domains available for allocation. Array is sized to
5268  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5269  * ethdev ports in a single process.
5270  */
5271 static struct rte_eth_dev_switch {
5272         enum rte_eth_switch_domain_state state;
5273 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
5274
5275 int
5276 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5277 {
5278         unsigned int i;
5279
5280         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5281
5282         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5283                 if (rte_eth_switch_domains[i].state ==
5284                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5285                         rte_eth_switch_domains[i].state =
5286                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5287                         *domain_id = i;
5288                         return 0;
5289                 }
5290         }
5291
5292         return -ENOSPC;
5293 }
5294
5295 int
5296 rte_eth_switch_domain_free(uint16_t domain_id)
5297 {
5298         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5299                 domain_id >= RTE_MAX_ETHPORTS)
5300                 return -EINVAL;
5301
5302         if (rte_eth_switch_domains[domain_id].state !=
5303                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5304                 return -EINVAL;
5305
5306         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5307
5308         return 0;
5309 }
5310
5311 static int
5312 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5313 {
5314         int state;
5315         struct rte_kvargs_pair *pair;
5316         char *letter;
5317
5318         arglist->str = strdup(str_in);
5319         if (arglist->str == NULL)
5320                 return -ENOMEM;
5321
5322         letter = arglist->str;
5323         state = 0;
5324         arglist->count = 0;
5325         pair = &arglist->pairs[0];
5326         while (1) {
5327                 switch (state) {
5328                 case 0: /* Initial */
5329                         if (*letter == '=')
5330                                 return -EINVAL;
5331                         else if (*letter == '\0')
5332                                 return 0;
5333
5334                         state = 1;
5335                         pair->key = letter;
5336                         /* fall-thru */
5337
5338                 case 1: /* Parsing key */
5339                         if (*letter == '=') {
5340                                 *letter = '\0';
5341                                 pair->value = letter + 1;
5342                                 state = 2;
5343                         } else if (*letter == ',' || *letter == '\0')
5344                                 return -EINVAL;
5345                         break;
5346
5347
5348                 case 2: /* Parsing value */
5349                         if (*letter == '[')
5350                                 state = 3;
5351                         else if (*letter == ',') {
5352                                 *letter = '\0';
5353                                 arglist->count++;
5354                                 pair = &arglist->pairs[arglist->count];
5355                                 state = 0;
5356                         } else if (*letter == '\0') {
5357                                 letter--;
5358                                 arglist->count++;
5359                                 pair = &arglist->pairs[arglist->count];
5360                                 state = 0;
5361                         }
5362                         break;
5363
5364                 case 3: /* Parsing list */
5365                         if (*letter == ']')
5366                                 state = 2;
5367                         else if (*letter == '\0')
5368                                 return -EINVAL;
5369                         break;
5370                 }
5371                 letter++;
5372         }
5373 }
5374
5375 int
5376 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5377 {
5378         struct rte_kvargs args;
5379         struct rte_kvargs_pair *pair;
5380         unsigned int i;
5381         int result = 0;
5382
5383         memset(eth_da, 0, sizeof(*eth_da));
5384
5385         result = rte_eth_devargs_tokenise(&args, dargs);
5386         if (result < 0)
5387                 goto parse_cleanup;
5388
5389         for (i = 0; i < args.count; i++) {
5390                 pair = &args.pairs[i];
5391                 if (strcmp("representor", pair->key) == 0) {
5392                         result = rte_eth_devargs_parse_list(pair->value,
5393                                 rte_eth_devargs_parse_representor_ports,
5394                                 eth_da);
5395                         if (result < 0)
5396                                 goto parse_cleanup;
5397                 }
5398         }
5399
5400 parse_cleanup:
5401         if (args.str)
5402                 free(args.str);
5403
5404         return result;
5405 }
5406
5407 static int
5408 handle_port_list(const char *cmd __rte_unused,
5409                 const char *params __rte_unused,
5410                 struct rte_tel_data *d)
5411 {
5412         int port_id;
5413
5414         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5415         RTE_ETH_FOREACH_DEV(port_id)
5416                 rte_tel_data_add_array_int(d, port_id);
5417         return 0;
5418 }
5419
5420 static void
5421 add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5422                 const char *stat_name)
5423 {
5424         int q;
5425         struct rte_tel_data *q_data = rte_tel_data_alloc();
5426         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5427         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5428                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
5429         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5430 }
5431
5432 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5433
5434 static int
5435 handle_port_stats(const char *cmd __rte_unused,
5436                 const char *params,
5437                 struct rte_tel_data *d)
5438 {
5439         struct rte_eth_stats stats;
5440         int port_id, ret;
5441
5442         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5443                 return -1;
5444
5445         port_id = atoi(params);
5446         if (!rte_eth_dev_is_valid_port(port_id))
5447                 return -1;
5448
5449         ret = rte_eth_stats_get(port_id, &stats);
5450         if (ret < 0)
5451                 return -1;
5452
5453         rte_tel_data_start_dict(d);
5454         ADD_DICT_STAT(stats, ipackets);
5455         ADD_DICT_STAT(stats, opackets);
5456         ADD_DICT_STAT(stats, ibytes);
5457         ADD_DICT_STAT(stats, obytes);
5458         ADD_DICT_STAT(stats, imissed);
5459         ADD_DICT_STAT(stats, ierrors);
5460         ADD_DICT_STAT(stats, oerrors);
5461         ADD_DICT_STAT(stats, rx_nombuf);
5462         add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5463         add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5464         add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5465         add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5466         add_port_queue_stats(d, stats.q_errors, "q_errors");
5467
5468         return 0;
5469 }
5470
5471 static int
5472 handle_port_xstats(const char *cmd __rte_unused,
5473                 const char *params,
5474                 struct rte_tel_data *d)
5475 {
5476         struct rte_eth_xstat *eth_xstats;
5477         struct rte_eth_xstat_name *xstat_names;
5478         int port_id, num_xstats;
5479         int i, ret;
5480         char *end_param;
5481
5482         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5483                 return -1;
5484
5485         port_id = strtoul(params, &end_param, 0);
5486         if (*end_param != '\0')
5487                 RTE_ETHDEV_LOG(NOTICE,
5488                         "Extra parameters passed to ethdev telemetry command, ignoring");
5489         if (!rte_eth_dev_is_valid_port(port_id))
5490                 return -1;
5491
5492         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5493         if (num_xstats < 0)
5494                 return -1;
5495
5496         /* use one malloc for both names and stats */
5497         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5498                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
5499         if (eth_xstats == NULL)
5500                 return -1;
5501         xstat_names = (void *)&eth_xstats[num_xstats];
5502
5503         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5504         if (ret < 0 || ret > num_xstats) {
5505                 free(eth_xstats);
5506                 return -1;
5507         }
5508
5509         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5510         if (ret < 0 || ret > num_xstats) {
5511                 free(eth_xstats);
5512                 return -1;
5513         }
5514
5515         rte_tel_data_start_dict(d);
5516         for (i = 0; i < num_xstats; i++)
5517                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5518                                 eth_xstats[i].value);
5519         return 0;
5520 }
5521
5522 static int
5523 handle_port_link_status(const char *cmd __rte_unused,
5524                 const char *params,
5525                 struct rte_tel_data *d)
5526 {
5527         static const char *status_str = "status";
5528         int ret, port_id;
5529         struct rte_eth_link link;
5530         char *end_param;
5531
5532         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5533                 return -1;
5534
5535         port_id = strtoul(params, &end_param, 0);
5536         if (*end_param != '\0')
5537                 RTE_ETHDEV_LOG(NOTICE,
5538                         "Extra parameters passed to ethdev telemetry command, ignoring");
5539         if (!rte_eth_dev_is_valid_port(port_id))
5540                 return -1;
5541
5542         ret = rte_eth_link_get(port_id, &link);
5543         if (ret < 0)
5544                 return -1;
5545
5546         rte_tel_data_start_dict(d);
5547         if (!link.link_status) {
5548                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
5549                 return 0;
5550         }
5551         rte_tel_data_add_dict_string(d, status_str, "UP");
5552         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5553         rte_tel_data_add_dict_string(d, "duplex",
5554                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
5555                                 "full-duplex" : "half-duplex");
5556         return 0;
5557 }
5558
5559 RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
5560
5561 RTE_INIT(ethdev_init_telemetry)
5562 {
5563         rte_telemetry_register_cmd("/ethdev/list", handle_port_list,
5564                         "Returns list of available ethdev ports. Takes no parameters");
5565         rte_telemetry_register_cmd("/ethdev/stats", handle_port_stats,
5566                         "Returns the common stats for a port. Parameters: int port_id");
5567         rte_telemetry_register_cmd("/ethdev/xstats", handle_port_xstats,
5568                         "Returns the extended stats for a port. Parameters: int port_id");
5569         rte_telemetry_register_cmd("/ethdev/link_status",
5570                         handle_port_link_status,
5571                         "Returns the link status for a port. Parameters: int port_id");
5572 }