ethdev: extract checking queue id into common functions
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40 #include <rte_ether.h>
41 #include <rte_telemetry.h>
42
43 #include "rte_ethdev_trace.h"
44 #include "rte_ethdev.h"
45 #include "rte_ethdev_driver.h"
46 #include "ethdev_profile.h"
47 #include "ethdev_private.h"
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS RTE_DIM(rte_stats_strings)
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS RTE_DIM(rte_rxq_stats_strings)
98
99 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
100         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
101         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
102 };
103 #define RTE_NB_TXQ_STATS RTE_DIM(rte_txq_stats_strings)
104
105 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
106         { DEV_RX_OFFLOAD_##_name, #_name }
107
108 static const struct {
109         uint64_t offload;
110         const char *name;
111 } rte_rx_offload_names[] = {
112         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
113         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
115         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
117         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
119         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
121         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
122         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
123         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
124         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
125         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
126         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
127         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
128         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
129         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
130         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
131 };
132
133 #undef RTE_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } rte_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
163         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
164 };
165
166 #undef RTE_TX_OFFLOAD_BIT2STR
167
168 /**
169  * The user application callback description.
170  *
171  * It contains callback address to be registered by user application,
172  * the pointer to the parameters for callback, and the event type.
173  */
174 struct rte_eth_dev_callback {
175         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
176         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
177         void *cb_arg;                           /**< Parameter for callback */
178         void *ret_param;                        /**< Return parameter */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 int
189 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
190 {
191         int ret;
192         struct rte_devargs devargs = {.args = NULL};
193         const char *bus_param_key;
194         char *bus_str = NULL;
195         char *cls_str = NULL;
196         int str_size;
197
198         memset(iter, 0, sizeof(*iter));
199
200         /*
201          * The devargs string may use various syntaxes:
202          *   - 0000:08:00.0,representor=[1-3]
203          *   - pci:0000:06:00.0,representor=[0,5]
204          *   - class=eth,mac=00:11:22:33:44:55
205          * A new syntax is in development (not yet supported):
206          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
207          */
208
209         /*
210          * Handle pure class filter (i.e. without any bus-level argument),
211          * from future new syntax.
212          * rte_devargs_parse() is not yet supporting the new syntax,
213          * that's why this simple case is temporarily parsed here.
214          */
215 #define iter_anybus_str "class=eth,"
216         if (strncmp(devargs_str, iter_anybus_str,
217                         strlen(iter_anybus_str)) == 0) {
218                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
219                 goto end;
220         }
221
222         /* Split bus, device and parameters. */
223         ret = rte_devargs_parse(&devargs, devargs_str);
224         if (ret != 0)
225                 goto error;
226
227         /*
228          * Assume parameters of old syntax can match only at ethdev level.
229          * Extra parameters will be ignored, thanks to "+" prefix.
230          */
231         str_size = strlen(devargs.args) + 2;
232         cls_str = malloc(str_size);
233         if (cls_str == NULL) {
234                 ret = -ENOMEM;
235                 goto error;
236         }
237         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
238         if (ret != str_size - 1) {
239                 ret = -EINVAL;
240                 goto error;
241         }
242         iter->cls_str = cls_str;
243         free(devargs.args); /* allocated by rte_devargs_parse() */
244         devargs.args = NULL;
245
246         iter->bus = devargs.bus;
247         if (iter->bus->dev_iterate == NULL) {
248                 ret = -ENOTSUP;
249                 goto error;
250         }
251
252         /* Convert bus args to new syntax for use with new API dev_iterate. */
253         if (strcmp(iter->bus->name, "vdev") == 0) {
254                 bus_param_key = "name";
255         } else if (strcmp(iter->bus->name, "pci") == 0) {
256                 bus_param_key = "addr";
257         } else {
258                 ret = -ENOTSUP;
259                 goto error;
260         }
261         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
262         bus_str = malloc(str_size);
263         if (bus_str == NULL) {
264                 ret = -ENOMEM;
265                 goto error;
266         }
267         ret = snprintf(bus_str, str_size, "%s=%s",
268                         bus_param_key, devargs.name);
269         if (ret != str_size - 1) {
270                 ret = -EINVAL;
271                 goto error;
272         }
273         iter->bus_str = bus_str;
274
275 end:
276         iter->cls = rte_class_find_by_name("eth");
277         return 0;
278
279 error:
280         if (ret == -ENOTSUP)
281                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
282                                 iter->bus->name);
283         free(devargs.args);
284         free(bus_str);
285         free(cls_str);
286         return ret;
287 }
288
289 uint16_t
290 rte_eth_iterator_next(struct rte_dev_iterator *iter)
291 {
292         if (iter->cls == NULL) /* invalid ethdev iterator */
293                 return RTE_MAX_ETHPORTS;
294
295         do { /* loop to try all matching rte_device */
296                 /* If not pure ethdev filter and */
297                 if (iter->bus != NULL &&
298                                 /* not in middle of rte_eth_dev iteration, */
299                                 iter->class_device == NULL) {
300                         /* get next rte_device to try. */
301                         iter->device = iter->bus->dev_iterate(
302                                         iter->device, iter->bus_str, iter);
303                         if (iter->device == NULL)
304                                 break; /* no more rte_device candidate */
305                 }
306                 /* A device is matching bus part, need to check ethdev part. */
307                 iter->class_device = iter->cls->dev_iterate(
308                                 iter->class_device, iter->cls_str, iter);
309                 if (iter->class_device != NULL)
310                         return eth_dev_to_id(iter->class_device); /* match */
311         } while (iter->bus != NULL); /* need to try next rte_device */
312
313         /* No more ethdev port to iterate. */
314         rte_eth_iterator_cleanup(iter);
315         return RTE_MAX_ETHPORTS;
316 }
317
318 void
319 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
320 {
321         if (iter->bus_str == NULL)
322                 return; /* nothing to free in pure class filter */
323         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
324         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
325         memset(iter, 0, sizeof(*iter));
326 }
327
328 uint16_t
329 rte_eth_find_next(uint16_t port_id)
330 {
331         while (port_id < RTE_MAX_ETHPORTS &&
332                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
333                 port_id++;
334
335         if (port_id >= RTE_MAX_ETHPORTS)
336                 return RTE_MAX_ETHPORTS;
337
338         return port_id;
339 }
340
341 /*
342  * Macro to iterate over all valid ports for internal usage.
343  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
344  */
345 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
346         for (port_id = rte_eth_find_next(0); \
347              port_id < RTE_MAX_ETHPORTS; \
348              port_id = rte_eth_find_next(port_id + 1))
349
350 uint16_t
351 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
352 {
353         port_id = rte_eth_find_next(port_id);
354         while (port_id < RTE_MAX_ETHPORTS &&
355                         rte_eth_devices[port_id].device != parent)
356                 port_id = rte_eth_find_next(port_id + 1);
357
358         return port_id;
359 }
360
361 uint16_t
362 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
363 {
364         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
365         return rte_eth_find_next_of(port_id,
366                         rte_eth_devices[ref_port_id].device);
367 }
368
369 static void
370 rte_eth_dev_shared_data_prepare(void)
371 {
372         const unsigned flags = 0;
373         const struct rte_memzone *mz;
374
375         rte_spinlock_lock(&rte_eth_shared_data_lock);
376
377         if (rte_eth_dev_shared_data == NULL) {
378                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
379                         /* Allocate port data and ownership shared memory. */
380                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
381                                         sizeof(*rte_eth_dev_shared_data),
382                                         rte_socket_id(), flags);
383                 } else
384                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
385                 if (mz == NULL)
386                         rte_panic("Cannot allocate ethdev shared data\n");
387
388                 rte_eth_dev_shared_data = mz->addr;
389                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
390                         rte_eth_dev_shared_data->next_owner_id =
391                                         RTE_ETH_DEV_NO_OWNER + 1;
392                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
393                         memset(rte_eth_dev_shared_data->data, 0,
394                                sizeof(rte_eth_dev_shared_data->data));
395                 }
396         }
397
398         rte_spinlock_unlock(&rte_eth_shared_data_lock);
399 }
400
401 static bool
402 is_allocated(const struct rte_eth_dev *ethdev)
403 {
404         return ethdev->data->name[0] != '\0';
405 }
406
407 static struct rte_eth_dev *
408 _rte_eth_dev_allocated(const char *name)
409 {
410         unsigned i;
411
412         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
413                 if (rte_eth_devices[i].data != NULL &&
414                     strcmp(rte_eth_devices[i].data->name, name) == 0)
415                         return &rte_eth_devices[i];
416         }
417         return NULL;
418 }
419
420 struct rte_eth_dev *
421 rte_eth_dev_allocated(const char *name)
422 {
423         struct rte_eth_dev *ethdev;
424
425         rte_eth_dev_shared_data_prepare();
426
427         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
428
429         ethdev = _rte_eth_dev_allocated(name);
430
431         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
432
433         return ethdev;
434 }
435
436 static uint16_t
437 rte_eth_dev_find_free_port(void)
438 {
439         unsigned i;
440
441         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
442                 /* Using shared name field to find a free port. */
443                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
444                         RTE_ASSERT(rte_eth_devices[i].state ==
445                                    RTE_ETH_DEV_UNUSED);
446                         return i;
447                 }
448         }
449         return RTE_MAX_ETHPORTS;
450 }
451
452 static struct rte_eth_dev *
453 eth_dev_get(uint16_t port_id)
454 {
455         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
456
457         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
458
459         return eth_dev;
460 }
461
462 struct rte_eth_dev *
463 rte_eth_dev_allocate(const char *name)
464 {
465         uint16_t port_id;
466         struct rte_eth_dev *eth_dev = NULL;
467         size_t name_len;
468
469         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
470         if (name_len == 0) {
471                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
472                 return NULL;
473         }
474
475         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
476                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
477                 return NULL;
478         }
479
480         rte_eth_dev_shared_data_prepare();
481
482         /* Synchronize port creation between primary and secondary threads. */
483         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
484
485         if (_rte_eth_dev_allocated(name) != NULL) {
486                 RTE_ETHDEV_LOG(ERR,
487                         "Ethernet device with name %s already allocated\n",
488                         name);
489                 goto unlock;
490         }
491
492         port_id = rte_eth_dev_find_free_port();
493         if (port_id == RTE_MAX_ETHPORTS) {
494                 RTE_ETHDEV_LOG(ERR,
495                         "Reached maximum number of Ethernet ports\n");
496                 goto unlock;
497         }
498
499         eth_dev = eth_dev_get(port_id);
500         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
501         eth_dev->data->port_id = port_id;
502         eth_dev->data->mtu = RTE_ETHER_MTU;
503         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
504
505 unlock:
506         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
507
508         return eth_dev;
509 }
510
511 /*
512  * Attach to a port already registered by the primary process, which
513  * makes sure that the same device would have the same port id both
514  * in the primary and secondary process.
515  */
516 struct rte_eth_dev *
517 rte_eth_dev_attach_secondary(const char *name)
518 {
519         uint16_t i;
520         struct rte_eth_dev *eth_dev = NULL;
521
522         rte_eth_dev_shared_data_prepare();
523
524         /* Synchronize port attachment to primary port creation and release. */
525         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
526
527         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
528                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
529                         break;
530         }
531         if (i == RTE_MAX_ETHPORTS) {
532                 RTE_ETHDEV_LOG(ERR,
533                         "Device %s is not driven by the primary process\n",
534                         name);
535         } else {
536                 eth_dev = eth_dev_get(i);
537                 RTE_ASSERT(eth_dev->data->port_id == i);
538         }
539
540         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
541         return eth_dev;
542 }
543
544 int
545 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
546 {
547         if (eth_dev == NULL)
548                 return -EINVAL;
549
550         rte_eth_dev_shared_data_prepare();
551
552         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
553                 rte_eth_dev_callback_process(eth_dev,
554                                 RTE_ETH_EVENT_DESTROY, NULL);
555
556         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
557
558         eth_dev->state = RTE_ETH_DEV_UNUSED;
559         eth_dev->device = NULL;
560         eth_dev->intr_handle = NULL;
561
562         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
563                 rte_free(eth_dev->data->rx_queues);
564                 rte_free(eth_dev->data->tx_queues);
565                 rte_free(eth_dev->data->mac_addrs);
566                 rte_free(eth_dev->data->hash_mac_addrs);
567                 rte_free(eth_dev->data->dev_private);
568                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
569                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
570         }
571
572         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
573
574         return 0;
575 }
576
577 int
578 rte_eth_dev_is_valid_port(uint16_t port_id)
579 {
580         if (port_id >= RTE_MAX_ETHPORTS ||
581             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
582                 return 0;
583         else
584                 return 1;
585 }
586
587 static int
588 rte_eth_is_valid_owner_id(uint64_t owner_id)
589 {
590         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
591             rte_eth_dev_shared_data->next_owner_id <= owner_id)
592                 return 0;
593         return 1;
594 }
595
596 uint64_t
597 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
598 {
599         port_id = rte_eth_find_next(port_id);
600         while (port_id < RTE_MAX_ETHPORTS &&
601                         rte_eth_devices[port_id].data->owner.id != owner_id)
602                 port_id = rte_eth_find_next(port_id + 1);
603
604         return port_id;
605 }
606
607 int
608 rte_eth_dev_owner_new(uint64_t *owner_id)
609 {
610         rte_eth_dev_shared_data_prepare();
611
612         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
613
614         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
615
616         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
617         return 0;
618 }
619
620 static int
621 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
622                        const struct rte_eth_dev_owner *new_owner)
623 {
624         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
625         struct rte_eth_dev_owner *port_owner;
626
627         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
628                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
629                         port_id);
630                 return -ENODEV;
631         }
632
633         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
634             !rte_eth_is_valid_owner_id(old_owner_id)) {
635                 RTE_ETHDEV_LOG(ERR,
636                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
637                        old_owner_id, new_owner->id);
638                 return -EINVAL;
639         }
640
641         port_owner = &rte_eth_devices[port_id].data->owner;
642         if (port_owner->id != old_owner_id) {
643                 RTE_ETHDEV_LOG(ERR,
644                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
645                         port_id, port_owner->name, port_owner->id);
646                 return -EPERM;
647         }
648
649         /* can not truncate (same structure) */
650         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
651
652         port_owner->id = new_owner->id;
653
654         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
655                 port_id, new_owner->name, new_owner->id);
656
657         return 0;
658 }
659
660 int
661 rte_eth_dev_owner_set(const uint16_t port_id,
662                       const struct rte_eth_dev_owner *owner)
663 {
664         int ret;
665
666         rte_eth_dev_shared_data_prepare();
667
668         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
669
670         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
671
672         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
673         return ret;
674 }
675
676 int
677 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
678 {
679         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
680                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
681         int ret;
682
683         rte_eth_dev_shared_data_prepare();
684
685         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
686
687         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
688
689         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
690         return ret;
691 }
692
693 int
694 rte_eth_dev_owner_delete(const uint64_t owner_id)
695 {
696         uint16_t port_id;
697         int ret = 0;
698
699         rte_eth_dev_shared_data_prepare();
700
701         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
702
703         if (rte_eth_is_valid_owner_id(owner_id)) {
704                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
705                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
706                                 memset(&rte_eth_devices[port_id].data->owner, 0,
707                                        sizeof(struct rte_eth_dev_owner));
708                 RTE_ETHDEV_LOG(NOTICE,
709                         "All port owners owned by %016"PRIx64" identifier have removed\n",
710                         owner_id);
711         } else {
712                 RTE_ETHDEV_LOG(ERR,
713                                "Invalid owner id=%016"PRIx64"\n",
714                                owner_id);
715                 ret = -EINVAL;
716         }
717
718         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
719
720         return ret;
721 }
722
723 int
724 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
725 {
726         int ret = 0;
727         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
728
729         rte_eth_dev_shared_data_prepare();
730
731         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
732
733         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
734                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
735                         port_id);
736                 ret = -ENODEV;
737         } else {
738                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
739         }
740
741         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
742         return ret;
743 }
744
745 int
746 rte_eth_dev_socket_id(uint16_t port_id)
747 {
748         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
749         return rte_eth_devices[port_id].data->numa_node;
750 }
751
752 void *
753 rte_eth_dev_get_sec_ctx(uint16_t port_id)
754 {
755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
756         return rte_eth_devices[port_id].security_ctx;
757 }
758
759 uint16_t
760 rte_eth_dev_count_avail(void)
761 {
762         uint16_t p;
763         uint16_t count;
764
765         count = 0;
766
767         RTE_ETH_FOREACH_DEV(p)
768                 count++;
769
770         return count;
771 }
772
773 uint16_t
774 rte_eth_dev_count_total(void)
775 {
776         uint16_t port, count = 0;
777
778         RTE_ETH_FOREACH_VALID_DEV(port)
779                 count++;
780
781         return count;
782 }
783
784 int
785 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
786 {
787         char *tmp;
788
789         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
790
791         if (name == NULL) {
792                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
793                 return -EINVAL;
794         }
795
796         /* shouldn't check 'rte_eth_devices[i].data',
797          * because it might be overwritten by VDEV PMD */
798         tmp = rte_eth_dev_shared_data->data[port_id].name;
799         strcpy(name, tmp);
800         return 0;
801 }
802
803 int
804 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
805 {
806         uint32_t pid;
807
808         if (name == NULL) {
809                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
810                 return -EINVAL;
811         }
812
813         RTE_ETH_FOREACH_VALID_DEV(pid)
814                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
815                         *port_id = pid;
816                         return 0;
817                 }
818
819         return -ENODEV;
820 }
821
822 static int
823 eth_err(uint16_t port_id, int ret)
824 {
825         if (ret == 0)
826                 return 0;
827         if (rte_eth_dev_is_removed(port_id))
828                 return -EIO;
829         return ret;
830 }
831
832 static int
833 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
834 {
835         uint16_t old_nb_queues = dev->data->nb_rx_queues;
836         void **rxq;
837         unsigned i;
838
839         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
840                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
841                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
842                                 RTE_CACHE_LINE_SIZE);
843                 if (dev->data->rx_queues == NULL) {
844                         dev->data->nb_rx_queues = 0;
845                         return -(ENOMEM);
846                 }
847         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
848                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
849
850                 rxq = dev->data->rx_queues;
851
852                 for (i = nb_queues; i < old_nb_queues; i++)
853                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
854                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
855                                 RTE_CACHE_LINE_SIZE);
856                 if (rxq == NULL)
857                         return -(ENOMEM);
858                 if (nb_queues > old_nb_queues) {
859                         uint16_t new_qs = nb_queues - old_nb_queues;
860
861                         memset(rxq + old_nb_queues, 0,
862                                 sizeof(rxq[0]) * new_qs);
863                 }
864
865                 dev->data->rx_queues = rxq;
866
867         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
868                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
869
870                 rxq = dev->data->rx_queues;
871
872                 for (i = nb_queues; i < old_nb_queues; i++)
873                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
874
875                 rte_free(dev->data->rx_queues);
876                 dev->data->rx_queues = NULL;
877         }
878         dev->data->nb_rx_queues = nb_queues;
879         return 0;
880 }
881
882 static int
883 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
884 {
885         uint16_t port_id;
886
887         if (rx_queue_id >= dev->data->nb_rx_queues) {
888                 port_id = dev->data->port_id;
889                 RTE_ETHDEV_LOG(ERR,
890                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
891                                rx_queue_id, port_id);
892                 return -EINVAL;
893         }
894
895         return 0;
896 }
897
898 static int
899 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
900 {
901         uint16_t port_id;
902
903         if (tx_queue_id >= dev->data->nb_tx_queues) {
904                 port_id = dev->data->port_id;
905                 RTE_ETHDEV_LOG(ERR,
906                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
907                                tx_queue_id, port_id);
908                 return -EINVAL;
909         }
910
911         return 0;
912 }
913
914 int
915 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
916 {
917         struct rte_eth_dev *dev;
918         int ret;
919
920         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
921
922         dev = &rte_eth_devices[port_id];
923         if (!dev->data->dev_started) {
924                 RTE_ETHDEV_LOG(ERR,
925                         "Port %u must be started before start any queue\n",
926                         port_id);
927                 return -EINVAL;
928         }
929
930         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
931         if (ret != 0)
932                 return ret;
933
934         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
935
936         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
937                 RTE_ETHDEV_LOG(INFO,
938                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
939                         rx_queue_id, port_id);
940                 return -EINVAL;
941         }
942
943         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
944                 RTE_ETHDEV_LOG(INFO,
945                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
946                         rx_queue_id, port_id);
947                 return 0;
948         }
949
950         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
951                                                              rx_queue_id));
952
953 }
954
955 int
956 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
957 {
958         struct rte_eth_dev *dev;
959         int ret;
960
961         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
962
963         dev = &rte_eth_devices[port_id];
964
965         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
966         if (ret != 0)
967                 return ret;
968
969         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
970
971         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
972                 RTE_ETHDEV_LOG(INFO,
973                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
974                         rx_queue_id, port_id);
975                 return -EINVAL;
976         }
977
978         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
979                 RTE_ETHDEV_LOG(INFO,
980                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
981                         rx_queue_id, port_id);
982                 return 0;
983         }
984
985         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
986
987 }
988
989 int
990 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
991 {
992         struct rte_eth_dev *dev;
993         int ret;
994
995         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
996
997         dev = &rte_eth_devices[port_id];
998         if (!dev->data->dev_started) {
999                 RTE_ETHDEV_LOG(ERR,
1000                         "Port %u must be started before start any queue\n",
1001                         port_id);
1002                 return -EINVAL;
1003         }
1004
1005         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1006         if (ret != 0)
1007                 return ret;
1008
1009         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1010
1011         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1012                 RTE_ETHDEV_LOG(INFO,
1013                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1014                         tx_queue_id, port_id);
1015                 return -EINVAL;
1016         }
1017
1018         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1019                 RTE_ETHDEV_LOG(INFO,
1020                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1021                         tx_queue_id, port_id);
1022                 return 0;
1023         }
1024
1025         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1026 }
1027
1028 int
1029 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1030 {
1031         struct rte_eth_dev *dev;
1032         int ret;
1033
1034         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1035
1036         dev = &rte_eth_devices[port_id];
1037
1038         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1039         if (ret != 0)
1040                 return ret;
1041
1042         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1043
1044         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1045                 RTE_ETHDEV_LOG(INFO,
1046                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1047                         tx_queue_id, port_id);
1048                 return -EINVAL;
1049         }
1050
1051         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1052                 RTE_ETHDEV_LOG(INFO,
1053                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1054                         tx_queue_id, port_id);
1055                 return 0;
1056         }
1057
1058         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1059
1060 }
1061
1062 static int
1063 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1064 {
1065         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1066         void **txq;
1067         unsigned i;
1068
1069         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1070                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1071                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1072                                                    RTE_CACHE_LINE_SIZE);
1073                 if (dev->data->tx_queues == NULL) {
1074                         dev->data->nb_tx_queues = 0;
1075                         return -(ENOMEM);
1076                 }
1077         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1078                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1079
1080                 txq = dev->data->tx_queues;
1081
1082                 for (i = nb_queues; i < old_nb_queues; i++)
1083                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1084                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1085                                   RTE_CACHE_LINE_SIZE);
1086                 if (txq == NULL)
1087                         return -ENOMEM;
1088                 if (nb_queues > old_nb_queues) {
1089                         uint16_t new_qs = nb_queues - old_nb_queues;
1090
1091                         memset(txq + old_nb_queues, 0,
1092                                sizeof(txq[0]) * new_qs);
1093                 }
1094
1095                 dev->data->tx_queues = txq;
1096
1097         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1098                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1099
1100                 txq = dev->data->tx_queues;
1101
1102                 for (i = nb_queues; i < old_nb_queues; i++)
1103                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1104
1105                 rte_free(dev->data->tx_queues);
1106                 dev->data->tx_queues = NULL;
1107         }
1108         dev->data->nb_tx_queues = nb_queues;
1109         return 0;
1110 }
1111
1112 uint32_t
1113 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1114 {
1115         switch (speed) {
1116         case ETH_SPEED_NUM_10M:
1117                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1118         case ETH_SPEED_NUM_100M:
1119                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1120         case ETH_SPEED_NUM_1G:
1121                 return ETH_LINK_SPEED_1G;
1122         case ETH_SPEED_NUM_2_5G:
1123                 return ETH_LINK_SPEED_2_5G;
1124         case ETH_SPEED_NUM_5G:
1125                 return ETH_LINK_SPEED_5G;
1126         case ETH_SPEED_NUM_10G:
1127                 return ETH_LINK_SPEED_10G;
1128         case ETH_SPEED_NUM_20G:
1129                 return ETH_LINK_SPEED_20G;
1130         case ETH_SPEED_NUM_25G:
1131                 return ETH_LINK_SPEED_25G;
1132         case ETH_SPEED_NUM_40G:
1133                 return ETH_LINK_SPEED_40G;
1134         case ETH_SPEED_NUM_50G:
1135                 return ETH_LINK_SPEED_50G;
1136         case ETH_SPEED_NUM_56G:
1137                 return ETH_LINK_SPEED_56G;
1138         case ETH_SPEED_NUM_100G:
1139                 return ETH_LINK_SPEED_100G;
1140         case ETH_SPEED_NUM_200G:
1141                 return ETH_LINK_SPEED_200G;
1142         default:
1143                 return 0;
1144         }
1145 }
1146
1147 const char *
1148 rte_eth_dev_rx_offload_name(uint64_t offload)
1149 {
1150         const char *name = "UNKNOWN";
1151         unsigned int i;
1152
1153         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1154                 if (offload == rte_rx_offload_names[i].offload) {
1155                         name = rte_rx_offload_names[i].name;
1156                         break;
1157                 }
1158         }
1159
1160         return name;
1161 }
1162
1163 const char *
1164 rte_eth_dev_tx_offload_name(uint64_t offload)
1165 {
1166         const char *name = "UNKNOWN";
1167         unsigned int i;
1168
1169         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1170                 if (offload == rte_tx_offload_names[i].offload) {
1171                         name = rte_tx_offload_names[i].name;
1172                         break;
1173                 }
1174         }
1175
1176         return name;
1177 }
1178
1179 static inline int
1180 check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1181                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1182 {
1183         int ret = 0;
1184
1185         if (dev_info_size == 0) {
1186                 if (config_size != max_rx_pkt_len) {
1187                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1188                                        " %u != %u is not allowed\n",
1189                                        port_id, config_size, max_rx_pkt_len);
1190                         ret = -EINVAL;
1191                 }
1192         } else if (config_size > dev_info_size) {
1193                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1194                                "> max allowed value %u\n", port_id, config_size,
1195                                dev_info_size);
1196                 ret = -EINVAL;
1197         } else if (config_size < RTE_ETHER_MIN_LEN) {
1198                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1199                                "< min allowed value %u\n", port_id, config_size,
1200                                (unsigned int)RTE_ETHER_MIN_LEN);
1201                 ret = -EINVAL;
1202         }
1203         return ret;
1204 }
1205
1206 /*
1207  * Validate offloads that are requested through rte_eth_dev_configure against
1208  * the offloads successfully set by the ethernet device.
1209  *
1210  * @param port_id
1211  *   The port identifier of the Ethernet device.
1212  * @param req_offloads
1213  *   The offloads that have been requested through `rte_eth_dev_configure`.
1214  * @param set_offloads
1215  *   The offloads successfully set by the ethernet device.
1216  * @param offload_type
1217  *   The offload type i.e. Rx/Tx string.
1218  * @param offload_name
1219  *   The function that prints the offload name.
1220  * @return
1221  *   - (0) if validation successful.
1222  *   - (-EINVAL) if requested offload has been silently disabled.
1223  *
1224  */
1225 static int
1226 validate_offloads(uint16_t port_id, uint64_t req_offloads,
1227                   uint64_t set_offloads, const char *offload_type,
1228                   const char *(*offload_name)(uint64_t))
1229 {
1230         uint64_t offloads_diff = req_offloads ^ set_offloads;
1231         uint64_t offload;
1232         int ret = 0;
1233
1234         while (offloads_diff != 0) {
1235                 /* Check if any offload is requested but not enabled. */
1236                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1237                 if (offload & req_offloads) {
1238                         RTE_ETHDEV_LOG(ERR,
1239                                 "Port %u failed to enable %s offload %s\n",
1240                                 port_id, offload_type, offload_name(offload));
1241                         ret = -EINVAL;
1242                 }
1243
1244                 /* Check if offload couldn't be disabled. */
1245                 if (offload & set_offloads) {
1246                         RTE_ETHDEV_LOG(DEBUG,
1247                                 "Port %u %s offload %s is not requested but enabled\n",
1248                                 port_id, offload_type, offload_name(offload));
1249                 }
1250
1251                 offloads_diff &= ~offload;
1252         }
1253
1254         return ret;
1255 }
1256
1257 int
1258 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1259                       const struct rte_eth_conf *dev_conf)
1260 {
1261         struct rte_eth_dev *dev;
1262         struct rte_eth_dev_info dev_info;
1263         struct rte_eth_conf orig_conf;
1264         int diag;
1265         int ret;
1266
1267         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1268
1269         dev = &rte_eth_devices[port_id];
1270
1271         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1272
1273         if (dev->data->dev_started) {
1274                 RTE_ETHDEV_LOG(ERR,
1275                         "Port %u must be stopped to allow configuration\n",
1276                         port_id);
1277                 return -EBUSY;
1278         }
1279
1280          /* Store original config, as rollback required on failure */
1281         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1282
1283         /*
1284          * Copy the dev_conf parameter into the dev structure.
1285          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1286          */
1287         if (dev_conf != &dev->data->dev_conf)
1288                 memcpy(&dev->data->dev_conf, dev_conf,
1289                        sizeof(dev->data->dev_conf));
1290
1291         ret = rte_eth_dev_info_get(port_id, &dev_info);
1292         if (ret != 0)
1293                 goto rollback;
1294
1295         /* If number of queues specified by application for both Rx and Tx is
1296          * zero, use driver preferred values. This cannot be done individually
1297          * as it is valid for either Tx or Rx (but not both) to be zero.
1298          * If driver does not provide any preferred valued, fall back on
1299          * EAL defaults.
1300          */
1301         if (nb_rx_q == 0 && nb_tx_q == 0) {
1302                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1303                 if (nb_rx_q == 0)
1304                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1305                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1306                 if (nb_tx_q == 0)
1307                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1308         }
1309
1310         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1311                 RTE_ETHDEV_LOG(ERR,
1312                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1313                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1314                 ret = -EINVAL;
1315                 goto rollback;
1316         }
1317
1318         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1319                 RTE_ETHDEV_LOG(ERR,
1320                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1321                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1322                 ret = -EINVAL;
1323                 goto rollback;
1324         }
1325
1326         /*
1327          * Check that the numbers of RX and TX queues are not greater
1328          * than the maximum number of RX and TX queues supported by the
1329          * configured device.
1330          */
1331         if (nb_rx_q > dev_info.max_rx_queues) {
1332                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1333                         port_id, nb_rx_q, dev_info.max_rx_queues);
1334                 ret = -EINVAL;
1335                 goto rollback;
1336         }
1337
1338         if (nb_tx_q > dev_info.max_tx_queues) {
1339                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1340                         port_id, nb_tx_q, dev_info.max_tx_queues);
1341                 ret = -EINVAL;
1342                 goto rollback;
1343         }
1344
1345         /* Check that the device supports requested interrupts */
1346         if ((dev_conf->intr_conf.lsc == 1) &&
1347                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1348                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1349                         dev->device->driver->name);
1350                 ret = -EINVAL;
1351                 goto rollback;
1352         }
1353         if ((dev_conf->intr_conf.rmv == 1) &&
1354                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1355                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1356                         dev->device->driver->name);
1357                 ret = -EINVAL;
1358                 goto rollback;
1359         }
1360
1361         /*
1362          * If jumbo frames are enabled, check that the maximum RX packet
1363          * length is supported by the configured device.
1364          */
1365         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1366                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1367                         RTE_ETHDEV_LOG(ERR,
1368                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1369                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1370                                 dev_info.max_rx_pktlen);
1371                         ret = -EINVAL;
1372                         goto rollback;
1373                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1374                         RTE_ETHDEV_LOG(ERR,
1375                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1376                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1377                                 (unsigned int)RTE_ETHER_MIN_LEN);
1378                         ret = -EINVAL;
1379                         goto rollback;
1380                 }
1381         } else {
1382                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1383                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1384                         /* Use default value */
1385                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1386                                                         RTE_ETHER_MAX_LEN;
1387         }
1388
1389         /*
1390          * If LRO is enabled, check that the maximum aggregated packet
1391          * size is supported by the configured device.
1392          */
1393         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1394                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1395                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1396                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1397                 ret = check_lro_pkt_size(port_id,
1398                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1399                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1400                                 dev_info.max_lro_pkt_size);
1401                 if (ret != 0)
1402                         goto rollback;
1403         }
1404
1405         /* Any requested offloading must be within its device capabilities */
1406         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1407              dev_conf->rxmode.offloads) {
1408                 RTE_ETHDEV_LOG(ERR,
1409                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1410                         "capabilities 0x%"PRIx64" in %s()\n",
1411                         port_id, dev_conf->rxmode.offloads,
1412                         dev_info.rx_offload_capa,
1413                         __func__);
1414                 ret = -EINVAL;
1415                 goto rollback;
1416         }
1417         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1418              dev_conf->txmode.offloads) {
1419                 RTE_ETHDEV_LOG(ERR,
1420                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1421                         "capabilities 0x%"PRIx64" in %s()\n",
1422                         port_id, dev_conf->txmode.offloads,
1423                         dev_info.tx_offload_capa,
1424                         __func__);
1425                 ret = -EINVAL;
1426                 goto rollback;
1427         }
1428
1429         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1430                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1431
1432         /* Check that device supports requested rss hash functions. */
1433         if ((dev_info.flow_type_rss_offloads |
1434              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1435             dev_info.flow_type_rss_offloads) {
1436                 RTE_ETHDEV_LOG(ERR,
1437                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1438                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1439                         dev_info.flow_type_rss_offloads);
1440                 ret = -EINVAL;
1441                 goto rollback;
1442         }
1443
1444         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1445         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1446             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1447                 RTE_ETHDEV_LOG(ERR,
1448                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1449                         port_id,
1450                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1451                 ret = -EINVAL;
1452                 goto rollback;
1453         }
1454
1455         /*
1456          * Setup new number of RX/TX queues and reconfigure device.
1457          */
1458         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1459         if (diag != 0) {
1460                 RTE_ETHDEV_LOG(ERR,
1461                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1462                         port_id, diag);
1463                 ret = diag;
1464                 goto rollback;
1465         }
1466
1467         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1468         if (diag != 0) {
1469                 RTE_ETHDEV_LOG(ERR,
1470                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1471                         port_id, diag);
1472                 rte_eth_dev_rx_queue_config(dev, 0);
1473                 ret = diag;
1474                 goto rollback;
1475         }
1476
1477         diag = (*dev->dev_ops->dev_configure)(dev);
1478         if (diag != 0) {
1479                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1480                         port_id, diag);
1481                 ret = eth_err(port_id, diag);
1482                 goto reset_queues;
1483         }
1484
1485         /* Initialize Rx profiling if enabled at compilation time. */
1486         diag = __rte_eth_dev_profile_init(port_id, dev);
1487         if (diag != 0) {
1488                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1489                         port_id, diag);
1490                 ret = eth_err(port_id, diag);
1491                 goto reset_queues;
1492         }
1493
1494         /* Validate Rx offloads. */
1495         diag = validate_offloads(port_id,
1496                         dev_conf->rxmode.offloads,
1497                         dev->data->dev_conf.rxmode.offloads, "Rx",
1498                         rte_eth_dev_rx_offload_name);
1499         if (diag != 0) {
1500                 ret = diag;
1501                 goto reset_queues;
1502         }
1503
1504         /* Validate Tx offloads. */
1505         diag = validate_offloads(port_id,
1506                         dev_conf->txmode.offloads,
1507                         dev->data->dev_conf.txmode.offloads, "Tx",
1508                         rte_eth_dev_tx_offload_name);
1509         if (diag != 0) {
1510                 ret = diag;
1511                 goto reset_queues;
1512         }
1513
1514         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1515         return 0;
1516 reset_queues:
1517         rte_eth_dev_rx_queue_config(dev, 0);
1518         rte_eth_dev_tx_queue_config(dev, 0);
1519 rollback:
1520         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1521
1522         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1523         return ret;
1524 }
1525
1526 void
1527 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1528 {
1529         if (dev->data->dev_started) {
1530                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1531                         dev->data->port_id);
1532                 return;
1533         }
1534
1535         rte_eth_dev_rx_queue_config(dev, 0);
1536         rte_eth_dev_tx_queue_config(dev, 0);
1537
1538         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1539 }
1540
1541 static void
1542 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1543                         struct rte_eth_dev_info *dev_info)
1544 {
1545         struct rte_ether_addr *addr;
1546         uint16_t i;
1547         uint32_t pool = 0;
1548         uint64_t pool_mask;
1549
1550         /* replay MAC address configuration including default MAC */
1551         addr = &dev->data->mac_addrs[0];
1552         if (*dev->dev_ops->mac_addr_set != NULL)
1553                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1554         else if (*dev->dev_ops->mac_addr_add != NULL)
1555                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1556
1557         if (*dev->dev_ops->mac_addr_add != NULL) {
1558                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1559                         addr = &dev->data->mac_addrs[i];
1560
1561                         /* skip zero address */
1562                         if (rte_is_zero_ether_addr(addr))
1563                                 continue;
1564
1565                         pool = 0;
1566                         pool_mask = dev->data->mac_pool_sel[i];
1567
1568                         do {
1569                                 if (pool_mask & 1ULL)
1570                                         (*dev->dev_ops->mac_addr_add)(dev,
1571                                                 addr, i, pool);
1572                                 pool_mask >>= 1;
1573                                 pool++;
1574                         } while (pool_mask);
1575                 }
1576         }
1577 }
1578
1579 static int
1580 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1581                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1582 {
1583         int ret;
1584
1585         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1586                 rte_eth_dev_mac_restore(dev, dev_info);
1587
1588         /* replay promiscuous configuration */
1589         /*
1590          * use callbacks directly since we don't need port_id check and
1591          * would like to bypass the same value set
1592          */
1593         if (rte_eth_promiscuous_get(port_id) == 1 &&
1594             *dev->dev_ops->promiscuous_enable != NULL) {
1595                 ret = eth_err(port_id,
1596                               (*dev->dev_ops->promiscuous_enable)(dev));
1597                 if (ret != 0 && ret != -ENOTSUP) {
1598                         RTE_ETHDEV_LOG(ERR,
1599                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1600                                 port_id, rte_strerror(-ret));
1601                         return ret;
1602                 }
1603         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1604                    *dev->dev_ops->promiscuous_disable != NULL) {
1605                 ret = eth_err(port_id,
1606                               (*dev->dev_ops->promiscuous_disable)(dev));
1607                 if (ret != 0 && ret != -ENOTSUP) {
1608                         RTE_ETHDEV_LOG(ERR,
1609                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1610                                 port_id, rte_strerror(-ret));
1611                         return ret;
1612                 }
1613         }
1614
1615         /* replay all multicast configuration */
1616         /*
1617          * use callbacks directly since we don't need port_id check and
1618          * would like to bypass the same value set
1619          */
1620         if (rte_eth_allmulticast_get(port_id) == 1 &&
1621             *dev->dev_ops->allmulticast_enable != NULL) {
1622                 ret = eth_err(port_id,
1623                               (*dev->dev_ops->allmulticast_enable)(dev));
1624                 if (ret != 0 && ret != -ENOTSUP) {
1625                         RTE_ETHDEV_LOG(ERR,
1626                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1627                                 port_id, rte_strerror(-ret));
1628                         return ret;
1629                 }
1630         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1631                    *dev->dev_ops->allmulticast_disable != NULL) {
1632                 ret = eth_err(port_id,
1633                               (*dev->dev_ops->allmulticast_disable)(dev));
1634                 if (ret != 0 && ret != -ENOTSUP) {
1635                         RTE_ETHDEV_LOG(ERR,
1636                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1637                                 port_id, rte_strerror(-ret));
1638                         return ret;
1639                 }
1640         }
1641
1642         return 0;
1643 }
1644
1645 int
1646 rte_eth_dev_start(uint16_t port_id)
1647 {
1648         struct rte_eth_dev *dev;
1649         struct rte_eth_dev_info dev_info;
1650         int diag;
1651         int ret;
1652
1653         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1654
1655         dev = &rte_eth_devices[port_id];
1656
1657         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1658
1659         if (dev->data->dev_started != 0) {
1660                 RTE_ETHDEV_LOG(INFO,
1661                         "Device with port_id=%"PRIu16" already started\n",
1662                         port_id);
1663                 return 0;
1664         }
1665
1666         ret = rte_eth_dev_info_get(port_id, &dev_info);
1667         if (ret != 0)
1668                 return ret;
1669
1670         /* Lets restore MAC now if device does not support live change */
1671         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1672                 rte_eth_dev_mac_restore(dev, &dev_info);
1673
1674         diag = (*dev->dev_ops->dev_start)(dev);
1675         if (diag == 0)
1676                 dev->data->dev_started = 1;
1677         else
1678                 return eth_err(port_id, diag);
1679
1680         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1681         if (ret != 0) {
1682                 RTE_ETHDEV_LOG(ERR,
1683                         "Error during restoring configuration for device (port %u): %s\n",
1684                         port_id, rte_strerror(-ret));
1685                 rte_eth_dev_stop(port_id);
1686                 return ret;
1687         }
1688
1689         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1690                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1691                 (*dev->dev_ops->link_update)(dev, 0);
1692         }
1693
1694         rte_ethdev_trace_start(port_id);
1695         return 0;
1696 }
1697
1698 void
1699 rte_eth_dev_stop(uint16_t port_id)
1700 {
1701         struct rte_eth_dev *dev;
1702
1703         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1704         dev = &rte_eth_devices[port_id];
1705
1706         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1707
1708         if (dev->data->dev_started == 0) {
1709                 RTE_ETHDEV_LOG(INFO,
1710                         "Device with port_id=%"PRIu16" already stopped\n",
1711                         port_id);
1712                 return;
1713         }
1714
1715         dev->data->dev_started = 0;
1716         (*dev->dev_ops->dev_stop)(dev);
1717         rte_ethdev_trace_stop(port_id);
1718 }
1719
1720 int
1721 rte_eth_dev_set_link_up(uint16_t port_id)
1722 {
1723         struct rte_eth_dev *dev;
1724
1725         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1726
1727         dev = &rte_eth_devices[port_id];
1728
1729         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1730         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1731 }
1732
1733 int
1734 rte_eth_dev_set_link_down(uint16_t port_id)
1735 {
1736         struct rte_eth_dev *dev;
1737
1738         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1739
1740         dev = &rte_eth_devices[port_id];
1741
1742         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1743         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1744 }
1745
1746 void
1747 rte_eth_dev_close(uint16_t port_id)
1748 {
1749         struct rte_eth_dev *dev;
1750
1751         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1752         dev = &rte_eth_devices[port_id];
1753
1754         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1755         dev->data->dev_started = 0;
1756         (*dev->dev_ops->dev_close)(dev);
1757
1758         rte_ethdev_trace_close(port_id);
1759         rte_eth_dev_release_port(dev);
1760 }
1761
1762 int
1763 rte_eth_dev_reset(uint16_t port_id)
1764 {
1765         struct rte_eth_dev *dev;
1766         int ret;
1767
1768         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1769         dev = &rte_eth_devices[port_id];
1770
1771         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1772
1773         rte_eth_dev_stop(port_id);
1774         ret = dev->dev_ops->dev_reset(dev);
1775
1776         return eth_err(port_id, ret);
1777 }
1778
1779 int
1780 rte_eth_dev_is_removed(uint16_t port_id)
1781 {
1782         struct rte_eth_dev *dev;
1783         int ret;
1784
1785         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1786
1787         dev = &rte_eth_devices[port_id];
1788
1789         if (dev->state == RTE_ETH_DEV_REMOVED)
1790                 return 1;
1791
1792         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1793
1794         ret = dev->dev_ops->is_removed(dev);
1795         if (ret != 0)
1796                 /* Device is physically removed. */
1797                 dev->state = RTE_ETH_DEV_REMOVED;
1798
1799         return ret;
1800 }
1801
1802 int
1803 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1804                        uint16_t nb_rx_desc, unsigned int socket_id,
1805                        const struct rte_eth_rxconf *rx_conf,
1806                        struct rte_mempool *mp)
1807 {
1808         int ret;
1809         uint32_t mbp_buf_size;
1810         struct rte_eth_dev *dev;
1811         struct rte_eth_dev_info dev_info;
1812         struct rte_eth_rxconf local_conf;
1813         void **rxq;
1814
1815         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1816
1817         dev = &rte_eth_devices[port_id];
1818         if (rx_queue_id >= dev->data->nb_rx_queues) {
1819                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1820                 return -EINVAL;
1821         }
1822
1823         if (mp == NULL) {
1824                 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1825                 return -EINVAL;
1826         }
1827
1828         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1829
1830         /*
1831          * Check the size of the mbuf data buffer.
1832          * This value must be provided in the private data of the memory pool.
1833          * First check that the memory pool has a valid private data.
1834          */
1835         ret = rte_eth_dev_info_get(port_id, &dev_info);
1836         if (ret != 0)
1837                 return ret;
1838
1839         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1840                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1841                         mp->name, (int)mp->private_data_size,
1842                         (int)sizeof(struct rte_pktmbuf_pool_private));
1843                 return -ENOSPC;
1844         }
1845         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1846
1847         if (mbp_buf_size < dev_info.min_rx_bufsize + RTE_PKTMBUF_HEADROOM) {
1848                 RTE_ETHDEV_LOG(ERR,
1849                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1850                         mp->name, (int)mbp_buf_size,
1851                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1852                         (int)RTE_PKTMBUF_HEADROOM,
1853                         (int)dev_info.min_rx_bufsize);
1854                 return -EINVAL;
1855         }
1856
1857         /* Use default specified by driver, if nb_rx_desc is zero */
1858         if (nb_rx_desc == 0) {
1859                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1860                 /* If driver default is also zero, fall back on EAL default */
1861                 if (nb_rx_desc == 0)
1862                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1863         }
1864
1865         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1866                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1867                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1868
1869                 RTE_ETHDEV_LOG(ERR,
1870                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1871                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1872                         dev_info.rx_desc_lim.nb_min,
1873                         dev_info.rx_desc_lim.nb_align);
1874                 return -EINVAL;
1875         }
1876
1877         if (dev->data->dev_started &&
1878                 !(dev_info.dev_capa &
1879                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1880                 return -EBUSY;
1881
1882         if (dev->data->dev_started &&
1883                 (dev->data->rx_queue_state[rx_queue_id] !=
1884                         RTE_ETH_QUEUE_STATE_STOPPED))
1885                 return -EBUSY;
1886
1887         rxq = dev->data->rx_queues;
1888         if (rxq[rx_queue_id]) {
1889                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1890                                         -ENOTSUP);
1891                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1892                 rxq[rx_queue_id] = NULL;
1893         }
1894
1895         if (rx_conf == NULL)
1896                 rx_conf = &dev_info.default_rxconf;
1897
1898         local_conf = *rx_conf;
1899
1900         /*
1901          * If an offloading has already been enabled in
1902          * rte_eth_dev_configure(), it has been enabled on all queues,
1903          * so there is no need to enable it in this queue again.
1904          * The local_conf.offloads input to underlying PMD only carries
1905          * those offloadings which are only enabled on this queue and
1906          * not enabled on all queues.
1907          */
1908         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1909
1910         /*
1911          * New added offloadings for this queue are those not enabled in
1912          * rte_eth_dev_configure() and they must be per-queue type.
1913          * A pure per-port offloading can't be enabled on a queue while
1914          * disabled on another queue. A pure per-port offloading can't
1915          * be enabled for any queue as new added one if it hasn't been
1916          * enabled in rte_eth_dev_configure().
1917          */
1918         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1919              local_conf.offloads) {
1920                 RTE_ETHDEV_LOG(ERR,
1921                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1922                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1923                         port_id, rx_queue_id, local_conf.offloads,
1924                         dev_info.rx_queue_offload_capa,
1925                         __func__);
1926                 return -EINVAL;
1927         }
1928
1929         /*
1930          * If LRO is enabled, check that the maximum aggregated packet
1931          * size is supported by the configured device.
1932          */
1933         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1934                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1935                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1936                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1937                 int ret = check_lro_pkt_size(port_id,
1938                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1939                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1940                                 dev_info.max_lro_pkt_size);
1941                 if (ret != 0)
1942                         return ret;
1943         }
1944
1945         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1946                                               socket_id, &local_conf, mp);
1947         if (!ret) {
1948                 if (!dev->data->min_rx_buf_size ||
1949                     dev->data->min_rx_buf_size > mbp_buf_size)
1950                         dev->data->min_rx_buf_size = mbp_buf_size;
1951         }
1952
1953         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
1954                 rx_conf, ret);
1955         return eth_err(port_id, ret);
1956 }
1957
1958 int
1959 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1960                                uint16_t nb_rx_desc,
1961                                const struct rte_eth_hairpin_conf *conf)
1962 {
1963         int ret;
1964         struct rte_eth_dev *dev;
1965         struct rte_eth_hairpin_cap cap;
1966         void **rxq;
1967         int i;
1968         int count;
1969
1970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1971
1972         dev = &rte_eth_devices[port_id];
1973         if (rx_queue_id >= dev->data->nb_rx_queues) {
1974                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1975                 return -EINVAL;
1976         }
1977         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1978         if (ret != 0)
1979                 return ret;
1980         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1981                                 -ENOTSUP);
1982         /* if nb_rx_desc is zero use max number of desc from the driver. */
1983         if (nb_rx_desc == 0)
1984                 nb_rx_desc = cap.max_nb_desc;
1985         if (nb_rx_desc > cap.max_nb_desc) {
1986                 RTE_ETHDEV_LOG(ERR,
1987                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1988                         nb_rx_desc, cap.max_nb_desc);
1989                 return -EINVAL;
1990         }
1991         if (conf->peer_count > cap.max_rx_2_tx) {
1992                 RTE_ETHDEV_LOG(ERR,
1993                         "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
1994                         conf->peer_count, cap.max_rx_2_tx);
1995                 return -EINVAL;
1996         }
1997         if (conf->peer_count == 0) {
1998                 RTE_ETHDEV_LOG(ERR,
1999                         "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
2000                         conf->peer_count);
2001                 return -EINVAL;
2002         }
2003         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2004              cap.max_nb_queues != UINT16_MAX; i++) {
2005                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2006                         count++;
2007         }
2008         if (count > cap.max_nb_queues) {
2009                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2010                 cap.max_nb_queues);
2011                 return -EINVAL;
2012         }
2013         if (dev->data->dev_started)
2014                 return -EBUSY;
2015         rxq = dev->data->rx_queues;
2016         if (rxq[rx_queue_id] != NULL) {
2017                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2018                                         -ENOTSUP);
2019                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2020                 rxq[rx_queue_id] = NULL;
2021         }
2022         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2023                                                       nb_rx_desc, conf);
2024         if (ret == 0)
2025                 dev->data->rx_queue_state[rx_queue_id] =
2026                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2027         return eth_err(port_id, ret);
2028 }
2029
2030 int
2031 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2032                        uint16_t nb_tx_desc, unsigned int socket_id,
2033                        const struct rte_eth_txconf *tx_conf)
2034 {
2035         struct rte_eth_dev *dev;
2036         struct rte_eth_dev_info dev_info;
2037         struct rte_eth_txconf local_conf;
2038         void **txq;
2039         int ret;
2040
2041         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2042
2043         dev = &rte_eth_devices[port_id];
2044         if (tx_queue_id >= dev->data->nb_tx_queues) {
2045                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2046                 return -EINVAL;
2047         }
2048
2049         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2050
2051         ret = rte_eth_dev_info_get(port_id, &dev_info);
2052         if (ret != 0)
2053                 return ret;
2054
2055         /* Use default specified by driver, if nb_tx_desc is zero */
2056         if (nb_tx_desc == 0) {
2057                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2058                 /* If driver default is zero, fall back on EAL default */
2059                 if (nb_tx_desc == 0)
2060                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2061         }
2062         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2063             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2064             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2065                 RTE_ETHDEV_LOG(ERR,
2066                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2067                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2068                         dev_info.tx_desc_lim.nb_min,
2069                         dev_info.tx_desc_lim.nb_align);
2070                 return -EINVAL;
2071         }
2072
2073         if (dev->data->dev_started &&
2074                 !(dev_info.dev_capa &
2075                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2076                 return -EBUSY;
2077
2078         if (dev->data->dev_started &&
2079                 (dev->data->tx_queue_state[tx_queue_id] !=
2080                         RTE_ETH_QUEUE_STATE_STOPPED))
2081                 return -EBUSY;
2082
2083         txq = dev->data->tx_queues;
2084         if (txq[tx_queue_id]) {
2085                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2086                                         -ENOTSUP);
2087                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2088                 txq[tx_queue_id] = NULL;
2089         }
2090
2091         if (tx_conf == NULL)
2092                 tx_conf = &dev_info.default_txconf;
2093
2094         local_conf = *tx_conf;
2095
2096         /*
2097          * If an offloading has already been enabled in
2098          * rte_eth_dev_configure(), it has been enabled on all queues,
2099          * so there is no need to enable it in this queue again.
2100          * The local_conf.offloads input to underlying PMD only carries
2101          * those offloadings which are only enabled on this queue and
2102          * not enabled on all queues.
2103          */
2104         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2105
2106         /*
2107          * New added offloadings for this queue are those not enabled in
2108          * rte_eth_dev_configure() and they must be per-queue type.
2109          * A pure per-port offloading can't be enabled on a queue while
2110          * disabled on another queue. A pure per-port offloading can't
2111          * be enabled for any queue as new added one if it hasn't been
2112          * enabled in rte_eth_dev_configure().
2113          */
2114         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2115              local_conf.offloads) {
2116                 RTE_ETHDEV_LOG(ERR,
2117                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2118                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2119                         port_id, tx_queue_id, local_conf.offloads,
2120                         dev_info.tx_queue_offload_capa,
2121                         __func__);
2122                 return -EINVAL;
2123         }
2124
2125         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2126         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2127                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2128 }
2129
2130 int
2131 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2132                                uint16_t nb_tx_desc,
2133                                const struct rte_eth_hairpin_conf *conf)
2134 {
2135         struct rte_eth_dev *dev;
2136         struct rte_eth_hairpin_cap cap;
2137         void **txq;
2138         int i;
2139         int count;
2140         int ret;
2141
2142         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2143         dev = &rte_eth_devices[port_id];
2144         if (tx_queue_id >= dev->data->nb_tx_queues) {
2145                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2146                 return -EINVAL;
2147         }
2148         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2149         if (ret != 0)
2150                 return ret;
2151         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2152                                 -ENOTSUP);
2153         /* if nb_rx_desc is zero use max number of desc from the driver. */
2154         if (nb_tx_desc == 0)
2155                 nb_tx_desc = cap.max_nb_desc;
2156         if (nb_tx_desc > cap.max_nb_desc) {
2157                 RTE_ETHDEV_LOG(ERR,
2158                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2159                         nb_tx_desc, cap.max_nb_desc);
2160                 return -EINVAL;
2161         }
2162         if (conf->peer_count > cap.max_tx_2_rx) {
2163                 RTE_ETHDEV_LOG(ERR,
2164                         "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
2165                         conf->peer_count, cap.max_tx_2_rx);
2166                 return -EINVAL;
2167         }
2168         if (conf->peer_count == 0) {
2169                 RTE_ETHDEV_LOG(ERR,
2170                         "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
2171                         conf->peer_count);
2172                 return -EINVAL;
2173         }
2174         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2175              cap.max_nb_queues != UINT16_MAX; i++) {
2176                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2177                         count++;
2178         }
2179         if (count > cap.max_nb_queues) {
2180                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2181                 cap.max_nb_queues);
2182                 return -EINVAL;
2183         }
2184         if (dev->data->dev_started)
2185                 return -EBUSY;
2186         txq = dev->data->tx_queues;
2187         if (txq[tx_queue_id] != NULL) {
2188                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2189                                         -ENOTSUP);
2190                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2191                 txq[tx_queue_id] = NULL;
2192         }
2193         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2194                 (dev, tx_queue_id, nb_tx_desc, conf);
2195         if (ret == 0)
2196                 dev->data->tx_queue_state[tx_queue_id] =
2197                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2198         return eth_err(port_id, ret);
2199 }
2200
2201 void
2202 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2203                 void *userdata __rte_unused)
2204 {
2205         rte_pktmbuf_free_bulk(pkts, unsent);
2206 }
2207
2208 void
2209 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2210                 void *userdata)
2211 {
2212         uint64_t *count = userdata;
2213
2214         rte_pktmbuf_free_bulk(pkts, unsent);
2215         *count += unsent;
2216 }
2217
2218 int
2219 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2220                 buffer_tx_error_fn cbfn, void *userdata)
2221 {
2222         buffer->error_callback = cbfn;
2223         buffer->error_userdata = userdata;
2224         return 0;
2225 }
2226
2227 int
2228 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2229 {
2230         int ret = 0;
2231
2232         if (buffer == NULL)
2233                 return -EINVAL;
2234
2235         buffer->size = size;
2236         if (buffer->error_callback == NULL) {
2237                 ret = rte_eth_tx_buffer_set_err_callback(
2238                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2239         }
2240
2241         return ret;
2242 }
2243
2244 int
2245 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2246 {
2247         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2248         int ret;
2249
2250         /* Validate Input Data. Bail if not valid or not supported. */
2251         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2252         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2253
2254         /* Call driver to free pending mbufs. */
2255         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2256                                                free_cnt);
2257         return eth_err(port_id, ret);
2258 }
2259
2260 int
2261 rte_eth_promiscuous_enable(uint16_t port_id)
2262 {
2263         struct rte_eth_dev *dev;
2264         int diag = 0;
2265
2266         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2267         dev = &rte_eth_devices[port_id];
2268
2269         if (dev->data->promiscuous == 1)
2270                 return 0;
2271
2272         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2273
2274         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2275         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2276
2277         return eth_err(port_id, diag);
2278 }
2279
2280 int
2281 rte_eth_promiscuous_disable(uint16_t port_id)
2282 {
2283         struct rte_eth_dev *dev;
2284         int diag = 0;
2285
2286         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2287         dev = &rte_eth_devices[port_id];
2288
2289         if (dev->data->promiscuous == 0)
2290                 return 0;
2291
2292         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2293
2294         dev->data->promiscuous = 0;
2295         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2296         if (diag != 0)
2297                 dev->data->promiscuous = 1;
2298
2299         return eth_err(port_id, diag);
2300 }
2301
2302 int
2303 rte_eth_promiscuous_get(uint16_t port_id)
2304 {
2305         struct rte_eth_dev *dev;
2306
2307         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2308
2309         dev = &rte_eth_devices[port_id];
2310         return dev->data->promiscuous;
2311 }
2312
2313 int
2314 rte_eth_allmulticast_enable(uint16_t port_id)
2315 {
2316         struct rte_eth_dev *dev;
2317         int diag;
2318
2319         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2320         dev = &rte_eth_devices[port_id];
2321
2322         if (dev->data->all_multicast == 1)
2323                 return 0;
2324
2325         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2326         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2327         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2328
2329         return eth_err(port_id, diag);
2330 }
2331
2332 int
2333 rte_eth_allmulticast_disable(uint16_t port_id)
2334 {
2335         struct rte_eth_dev *dev;
2336         int diag;
2337
2338         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2339         dev = &rte_eth_devices[port_id];
2340
2341         if (dev->data->all_multicast == 0)
2342                 return 0;
2343
2344         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2345         dev->data->all_multicast = 0;
2346         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2347         if (diag != 0)
2348                 dev->data->all_multicast = 1;
2349
2350         return eth_err(port_id, diag);
2351 }
2352
2353 int
2354 rte_eth_allmulticast_get(uint16_t port_id)
2355 {
2356         struct rte_eth_dev *dev;
2357
2358         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2359
2360         dev = &rte_eth_devices[port_id];
2361         return dev->data->all_multicast;
2362 }
2363
2364 int
2365 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2366 {
2367         struct rte_eth_dev *dev;
2368
2369         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2370         dev = &rte_eth_devices[port_id];
2371
2372         if (dev->data->dev_conf.intr_conf.lsc &&
2373             dev->data->dev_started)
2374                 rte_eth_linkstatus_get(dev, eth_link);
2375         else {
2376                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2377                 (*dev->dev_ops->link_update)(dev, 1);
2378                 *eth_link = dev->data->dev_link;
2379         }
2380
2381         return 0;
2382 }
2383
2384 int
2385 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2386 {
2387         struct rte_eth_dev *dev;
2388
2389         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2390         dev = &rte_eth_devices[port_id];
2391
2392         if (dev->data->dev_conf.intr_conf.lsc &&
2393             dev->data->dev_started)
2394                 rte_eth_linkstatus_get(dev, eth_link);
2395         else {
2396                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2397                 (*dev->dev_ops->link_update)(dev, 0);
2398                 *eth_link = dev->data->dev_link;
2399         }
2400
2401         return 0;
2402 }
2403
2404 const char *
2405 rte_eth_link_speed_to_str(uint32_t link_speed)
2406 {
2407         switch (link_speed) {
2408         case ETH_SPEED_NUM_NONE: return "None";
2409         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2410         case ETH_SPEED_NUM_100M: return "100 Mbps";
2411         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2412         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2413         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2414         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2415         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2416         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2417         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2418         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2419         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2420         case ETH_SPEED_NUM_100G: return "100 Gbps";
2421         case ETH_SPEED_NUM_200G: return "200 Gbps";
2422         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2423         default: return "Invalid";
2424         }
2425 }
2426
2427 int
2428 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2429 {
2430         if (eth_link->link_status == ETH_LINK_DOWN)
2431                 return snprintf(str, len, "Link down");
2432         else
2433                 return snprintf(str, len, "Link up at %s %s %s",
2434                         rte_eth_link_speed_to_str(eth_link->link_speed),
2435                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2436                         "FDX" : "HDX",
2437                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2438                         "Autoneg" : "Fixed");
2439 }
2440
2441 int
2442 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2443 {
2444         struct rte_eth_dev *dev;
2445
2446         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2447
2448         dev = &rte_eth_devices[port_id];
2449         memset(stats, 0, sizeof(*stats));
2450
2451         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2452         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2453         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2454 }
2455
2456 int
2457 rte_eth_stats_reset(uint16_t port_id)
2458 {
2459         struct rte_eth_dev *dev;
2460         int ret;
2461
2462         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2463         dev = &rte_eth_devices[port_id];
2464
2465         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2466         ret = (*dev->dev_ops->stats_reset)(dev);
2467         if (ret != 0)
2468                 return eth_err(port_id, ret);
2469
2470         dev->data->rx_mbuf_alloc_failed = 0;
2471
2472         return 0;
2473 }
2474
2475 static inline int
2476 get_xstats_basic_count(struct rte_eth_dev *dev)
2477 {
2478         uint16_t nb_rxqs, nb_txqs;
2479         int count;
2480
2481         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2482         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2483
2484         count = RTE_NB_STATS;
2485         count += nb_rxqs * RTE_NB_RXQ_STATS;
2486         count += nb_txqs * RTE_NB_TXQ_STATS;
2487
2488         return count;
2489 }
2490
2491 static int
2492 get_xstats_count(uint16_t port_id)
2493 {
2494         struct rte_eth_dev *dev;
2495         int count;
2496
2497         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2498         dev = &rte_eth_devices[port_id];
2499         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2500                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2501                                 NULL, 0);
2502                 if (count < 0)
2503                         return eth_err(port_id, count);
2504         }
2505         if (dev->dev_ops->xstats_get_names != NULL) {
2506                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2507                 if (count < 0)
2508                         return eth_err(port_id, count);
2509         } else
2510                 count = 0;
2511
2512
2513         count += get_xstats_basic_count(dev);
2514
2515         return count;
2516 }
2517
2518 int
2519 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2520                 uint64_t *id)
2521 {
2522         int cnt_xstats, idx_xstat;
2523
2524         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2525
2526         if (!id) {
2527                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2528                 return -ENOMEM;
2529         }
2530
2531         if (!xstat_name) {
2532                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2533                 return -ENOMEM;
2534         }
2535
2536         /* Get count */
2537         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2538         if (cnt_xstats  < 0) {
2539                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2540                 return -ENODEV;
2541         }
2542
2543         /* Get id-name lookup table */
2544         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2545
2546         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2547                         port_id, xstats_names, cnt_xstats, NULL)) {
2548                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2549                 return -1;
2550         }
2551
2552         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2553                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2554                         *id = idx_xstat;
2555                         return 0;
2556                 };
2557         }
2558
2559         return -EINVAL;
2560 }
2561
2562 /* retrieve basic stats names */
2563 static int
2564 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2565         struct rte_eth_xstat_name *xstats_names)
2566 {
2567         int cnt_used_entries = 0;
2568         uint32_t idx, id_queue;
2569         uint16_t num_q;
2570
2571         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2572                 strlcpy(xstats_names[cnt_used_entries].name,
2573                         rte_stats_strings[idx].name,
2574                         sizeof(xstats_names[0].name));
2575                 cnt_used_entries++;
2576         }
2577         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2578         for (id_queue = 0; id_queue < num_q; id_queue++) {
2579                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2580                         snprintf(xstats_names[cnt_used_entries].name,
2581                                 sizeof(xstats_names[0].name),
2582                                 "rx_q%u_%s",
2583                                 id_queue, rte_rxq_stats_strings[idx].name);
2584                         cnt_used_entries++;
2585                 }
2586
2587         }
2588         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2589         for (id_queue = 0; id_queue < num_q; id_queue++) {
2590                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2591                         snprintf(xstats_names[cnt_used_entries].name,
2592                                 sizeof(xstats_names[0].name),
2593                                 "tx_q%u_%s",
2594                                 id_queue, rte_txq_stats_strings[idx].name);
2595                         cnt_used_entries++;
2596                 }
2597         }
2598         return cnt_used_entries;
2599 }
2600
2601 /* retrieve ethdev extended statistics names */
2602 int
2603 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2604         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2605         uint64_t *ids)
2606 {
2607         struct rte_eth_xstat_name *xstats_names_copy;
2608         unsigned int no_basic_stat_requested = 1;
2609         unsigned int no_ext_stat_requested = 1;
2610         unsigned int expected_entries;
2611         unsigned int basic_count;
2612         struct rte_eth_dev *dev;
2613         unsigned int i;
2614         int ret;
2615
2616         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2617         dev = &rte_eth_devices[port_id];
2618
2619         basic_count = get_xstats_basic_count(dev);
2620         ret = get_xstats_count(port_id);
2621         if (ret < 0)
2622                 return ret;
2623         expected_entries = (unsigned int)ret;
2624
2625         /* Return max number of stats if no ids given */
2626         if (!ids) {
2627                 if (!xstats_names)
2628                         return expected_entries;
2629                 else if (xstats_names && size < expected_entries)
2630                         return expected_entries;
2631         }
2632
2633         if (ids && !xstats_names)
2634                 return -EINVAL;
2635
2636         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2637                 uint64_t ids_copy[size];
2638
2639                 for (i = 0; i < size; i++) {
2640                         if (ids[i] < basic_count) {
2641                                 no_basic_stat_requested = 0;
2642                                 break;
2643                         }
2644
2645                         /*
2646                          * Convert ids to xstats ids that PMD knows.
2647                          * ids known by user are basic + extended stats.
2648                          */
2649                         ids_copy[i] = ids[i] - basic_count;
2650                 }
2651
2652                 if (no_basic_stat_requested)
2653                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2654                                         xstats_names, ids_copy, size);
2655         }
2656
2657         /* Retrieve all stats */
2658         if (!ids) {
2659                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2660                                 expected_entries);
2661                 if (num_stats < 0 || num_stats > (int)expected_entries)
2662                         return num_stats;
2663                 else
2664                         return expected_entries;
2665         }
2666
2667         xstats_names_copy = calloc(expected_entries,
2668                 sizeof(struct rte_eth_xstat_name));
2669
2670         if (!xstats_names_copy) {
2671                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2672                 return -ENOMEM;
2673         }
2674
2675         if (ids) {
2676                 for (i = 0; i < size; i++) {
2677                         if (ids[i] >= basic_count) {
2678                                 no_ext_stat_requested = 0;
2679                                 break;
2680                         }
2681                 }
2682         }
2683
2684         /* Fill xstats_names_copy structure */
2685         if (ids && no_ext_stat_requested) {
2686                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2687         } else {
2688                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2689                         expected_entries);
2690                 if (ret < 0) {
2691                         free(xstats_names_copy);
2692                         return ret;
2693                 }
2694         }
2695
2696         /* Filter stats */
2697         for (i = 0; i < size; i++) {
2698                 if (ids[i] >= expected_entries) {
2699                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2700                         free(xstats_names_copy);
2701                         return -1;
2702                 }
2703                 xstats_names[i] = xstats_names_copy[ids[i]];
2704         }
2705
2706         free(xstats_names_copy);
2707         return size;
2708 }
2709
2710 int
2711 rte_eth_xstats_get_names(uint16_t port_id,
2712         struct rte_eth_xstat_name *xstats_names,
2713         unsigned int size)
2714 {
2715         struct rte_eth_dev *dev;
2716         int cnt_used_entries;
2717         int cnt_expected_entries;
2718         int cnt_driver_entries;
2719
2720         cnt_expected_entries = get_xstats_count(port_id);
2721         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2722                         (int)size < cnt_expected_entries)
2723                 return cnt_expected_entries;
2724
2725         /* port_id checked in get_xstats_count() */
2726         dev = &rte_eth_devices[port_id];
2727
2728         cnt_used_entries = rte_eth_basic_stats_get_names(
2729                 dev, xstats_names);
2730
2731         if (dev->dev_ops->xstats_get_names != NULL) {
2732                 /* If there are any driver-specific xstats, append them
2733                  * to end of list.
2734                  */
2735                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2736                         dev,
2737                         xstats_names + cnt_used_entries,
2738                         size - cnt_used_entries);
2739                 if (cnt_driver_entries < 0)
2740                         return eth_err(port_id, cnt_driver_entries);
2741                 cnt_used_entries += cnt_driver_entries;
2742         }
2743
2744         return cnt_used_entries;
2745 }
2746
2747
2748 static int
2749 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2750 {
2751         struct rte_eth_dev *dev;
2752         struct rte_eth_stats eth_stats;
2753         unsigned int count = 0, i, q;
2754         uint64_t val, *stats_ptr;
2755         uint16_t nb_rxqs, nb_txqs;
2756         int ret;
2757
2758         ret = rte_eth_stats_get(port_id, &eth_stats);
2759         if (ret < 0)
2760                 return ret;
2761
2762         dev = &rte_eth_devices[port_id];
2763
2764         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2765         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2766
2767         /* global stats */
2768         for (i = 0; i < RTE_NB_STATS; i++) {
2769                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2770                                         rte_stats_strings[i].offset);
2771                 val = *stats_ptr;
2772                 xstats[count++].value = val;
2773         }
2774
2775         /* per-rxq stats */
2776         for (q = 0; q < nb_rxqs; q++) {
2777                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2778                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2779                                         rte_rxq_stats_strings[i].offset +
2780                                         q * sizeof(uint64_t));
2781                         val = *stats_ptr;
2782                         xstats[count++].value = val;
2783                 }
2784         }
2785
2786         /* per-txq stats */
2787         for (q = 0; q < nb_txqs; q++) {
2788                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2789                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2790                                         rte_txq_stats_strings[i].offset +
2791                                         q * sizeof(uint64_t));
2792                         val = *stats_ptr;
2793                         xstats[count++].value = val;
2794                 }
2795         }
2796         return count;
2797 }
2798
2799 /* retrieve ethdev extended statistics */
2800 int
2801 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2802                          uint64_t *values, unsigned int size)
2803 {
2804         unsigned int no_basic_stat_requested = 1;
2805         unsigned int no_ext_stat_requested = 1;
2806         unsigned int num_xstats_filled;
2807         unsigned int basic_count;
2808         uint16_t expected_entries;
2809         struct rte_eth_dev *dev;
2810         unsigned int i;
2811         int ret;
2812
2813         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2814         ret = get_xstats_count(port_id);
2815         if (ret < 0)
2816                 return ret;
2817         expected_entries = (uint16_t)ret;
2818         struct rte_eth_xstat xstats[expected_entries];
2819         dev = &rte_eth_devices[port_id];
2820         basic_count = get_xstats_basic_count(dev);
2821
2822         /* Return max number of stats if no ids given */
2823         if (!ids) {
2824                 if (!values)
2825                         return expected_entries;
2826                 else if (values && size < expected_entries)
2827                         return expected_entries;
2828         }
2829
2830         if (ids && !values)
2831                 return -EINVAL;
2832
2833         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2834                 unsigned int basic_count = get_xstats_basic_count(dev);
2835                 uint64_t ids_copy[size];
2836
2837                 for (i = 0; i < size; i++) {
2838                         if (ids[i] < basic_count) {
2839                                 no_basic_stat_requested = 0;
2840                                 break;
2841                         }
2842
2843                         /*
2844                          * Convert ids to xstats ids that PMD knows.
2845                          * ids known by user are basic + extended stats.
2846                          */
2847                         ids_copy[i] = ids[i] - basic_count;
2848                 }
2849
2850                 if (no_basic_stat_requested)
2851                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2852                                         values, size);
2853         }
2854
2855         if (ids) {
2856                 for (i = 0; i < size; i++) {
2857                         if (ids[i] >= basic_count) {
2858                                 no_ext_stat_requested = 0;
2859                                 break;
2860                         }
2861                 }
2862         }
2863
2864         /* Fill the xstats structure */
2865         if (ids && no_ext_stat_requested)
2866                 ret = rte_eth_basic_stats_get(port_id, xstats);
2867         else
2868                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2869
2870         if (ret < 0)
2871                 return ret;
2872         num_xstats_filled = (unsigned int)ret;
2873
2874         /* Return all stats */
2875         if (!ids) {
2876                 for (i = 0; i < num_xstats_filled; i++)
2877                         values[i] = xstats[i].value;
2878                 return expected_entries;
2879         }
2880
2881         /* Filter stats */
2882         for (i = 0; i < size; i++) {
2883                 if (ids[i] >= expected_entries) {
2884                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2885                         return -1;
2886                 }
2887                 values[i] = xstats[ids[i]].value;
2888         }
2889         return size;
2890 }
2891
2892 int
2893 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2894         unsigned int n)
2895 {
2896         struct rte_eth_dev *dev;
2897         unsigned int count = 0, i;
2898         signed int xcount = 0;
2899         uint16_t nb_rxqs, nb_txqs;
2900         int ret;
2901
2902         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2903
2904         dev = &rte_eth_devices[port_id];
2905
2906         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2907         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2908
2909         /* Return generic statistics */
2910         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2911                 (nb_txqs * RTE_NB_TXQ_STATS);
2912
2913         /* implemented by the driver */
2914         if (dev->dev_ops->xstats_get != NULL) {
2915                 /* Retrieve the xstats from the driver at the end of the
2916                  * xstats struct.
2917                  */
2918                 xcount = (*dev->dev_ops->xstats_get)(dev,
2919                                      xstats ? xstats + count : NULL,
2920                                      (n > count) ? n - count : 0);
2921
2922                 if (xcount < 0)
2923                         return eth_err(port_id, xcount);
2924         }
2925
2926         if (n < count + xcount || xstats == NULL)
2927                 return count + xcount;
2928
2929         /* now fill the xstats structure */
2930         ret = rte_eth_basic_stats_get(port_id, xstats);
2931         if (ret < 0)
2932                 return ret;
2933         count = ret;
2934
2935         for (i = 0; i < count; i++)
2936                 xstats[i].id = i;
2937         /* add an offset to driver-specific stats */
2938         for ( ; i < count + xcount; i++)
2939                 xstats[i].id += count;
2940
2941         return count + xcount;
2942 }
2943
2944 /* reset ethdev extended statistics */
2945 int
2946 rte_eth_xstats_reset(uint16_t port_id)
2947 {
2948         struct rte_eth_dev *dev;
2949
2950         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2951         dev = &rte_eth_devices[port_id];
2952
2953         /* implemented by the driver */
2954         if (dev->dev_ops->xstats_reset != NULL)
2955                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
2956
2957         /* fallback to default */
2958         return rte_eth_stats_reset(port_id);
2959 }
2960
2961 static int
2962 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2963                 uint8_t is_rx)
2964 {
2965         struct rte_eth_dev *dev;
2966
2967         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2968
2969         dev = &rte_eth_devices[port_id];
2970
2971         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2972
2973         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2974                 return -EINVAL;
2975
2976         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2977                 return -EINVAL;
2978
2979         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2980                 return -EINVAL;
2981
2982         return (*dev->dev_ops->queue_stats_mapping_set)
2983                         (dev, queue_id, stat_idx, is_rx);
2984 }
2985
2986
2987 int
2988 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2989                 uint8_t stat_idx)
2990 {
2991         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2992                                                 stat_idx, STAT_QMAP_TX));
2993 }
2994
2995
2996 int
2997 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2998                 uint8_t stat_idx)
2999 {
3000         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
3001                                                 stat_idx, STAT_QMAP_RX));
3002 }
3003
3004 int
3005 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3006 {
3007         struct rte_eth_dev *dev;
3008
3009         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3010         dev = &rte_eth_devices[port_id];
3011
3012         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3013         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3014                                                         fw_version, fw_size));
3015 }
3016
3017 int
3018 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3019 {
3020         struct rte_eth_dev *dev;
3021         const struct rte_eth_desc_lim lim = {
3022                 .nb_max = UINT16_MAX,
3023                 .nb_min = 0,
3024                 .nb_align = 1,
3025                 .nb_seg_max = UINT16_MAX,
3026                 .nb_mtu_seg_max = UINT16_MAX,
3027         };
3028         int diag;
3029
3030         /*
3031          * Init dev_info before port_id check since caller does not have
3032          * return status and does not know if get is successful or not.
3033          */
3034         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3035         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3036
3037         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3038         dev = &rte_eth_devices[port_id];
3039
3040         dev_info->rx_desc_lim = lim;
3041         dev_info->tx_desc_lim = lim;
3042         dev_info->device = dev->device;
3043         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3044         dev_info->max_mtu = UINT16_MAX;
3045
3046         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3047         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3048         if (diag != 0) {
3049                 /* Cleanup already filled in device information */
3050                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3051                 return eth_err(port_id, diag);
3052         }
3053
3054         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3055         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3056                         RTE_MAX_QUEUES_PER_PORT);
3057         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3058                         RTE_MAX_QUEUES_PER_PORT);
3059
3060         dev_info->driver_name = dev->device->driver->name;
3061         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3062         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3063
3064         dev_info->dev_flags = &dev->data->dev_flags;
3065
3066         return 0;
3067 }
3068
3069 int
3070 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3071                                  uint32_t *ptypes, int num)
3072 {
3073         int i, j;
3074         struct rte_eth_dev *dev;
3075         const uint32_t *all_ptypes;
3076
3077         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3078         dev = &rte_eth_devices[port_id];
3079         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3080         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3081
3082         if (!all_ptypes)
3083                 return 0;
3084
3085         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3086                 if (all_ptypes[i] & ptype_mask) {
3087                         if (j < num)
3088                                 ptypes[j] = all_ptypes[i];
3089                         j++;
3090                 }
3091
3092         return j;
3093 }
3094
3095 int
3096 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3097                                  uint32_t *set_ptypes, unsigned int num)
3098 {
3099         const uint32_t valid_ptype_masks[] = {
3100                 RTE_PTYPE_L2_MASK,
3101                 RTE_PTYPE_L3_MASK,
3102                 RTE_PTYPE_L4_MASK,
3103                 RTE_PTYPE_TUNNEL_MASK,
3104                 RTE_PTYPE_INNER_L2_MASK,
3105                 RTE_PTYPE_INNER_L3_MASK,
3106                 RTE_PTYPE_INNER_L4_MASK,
3107         };
3108         const uint32_t *all_ptypes;
3109         struct rte_eth_dev *dev;
3110         uint32_t unused_mask;
3111         unsigned int i, j;
3112         int ret;
3113
3114         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3115         dev = &rte_eth_devices[port_id];
3116
3117         if (num > 0 && set_ptypes == NULL)
3118                 return -EINVAL;
3119
3120         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3121                         *dev->dev_ops->dev_ptypes_set == NULL) {
3122                 ret = 0;
3123                 goto ptype_unknown;
3124         }
3125
3126         if (ptype_mask == 0) {
3127                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3128                                 ptype_mask);
3129                 goto ptype_unknown;
3130         }
3131
3132         unused_mask = ptype_mask;
3133         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3134                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3135                 if (mask && mask != valid_ptype_masks[i]) {
3136                         ret = -EINVAL;
3137                         goto ptype_unknown;
3138                 }
3139                 unused_mask &= ~valid_ptype_masks[i];
3140         }
3141
3142         if (unused_mask) {
3143                 ret = -EINVAL;
3144                 goto ptype_unknown;
3145         }
3146
3147         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3148         if (all_ptypes == NULL) {
3149                 ret = 0;
3150                 goto ptype_unknown;
3151         }
3152
3153         /*
3154          * Accommodate as many set_ptypes as possible. If the supplied
3155          * set_ptypes array is insufficient fill it partially.
3156          */
3157         for (i = 0, j = 0; set_ptypes != NULL &&
3158                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3159                 if (ptype_mask & all_ptypes[i]) {
3160                         if (j < num - 1) {
3161                                 set_ptypes[j] = all_ptypes[i];
3162                                 j++;
3163                                 continue;
3164                         }
3165                         break;
3166                 }
3167         }
3168
3169         if (set_ptypes != NULL && j < num)
3170                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3171
3172         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3173
3174 ptype_unknown:
3175         if (num > 0)
3176                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3177
3178         return ret;
3179 }
3180
3181 int
3182 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3183 {
3184         struct rte_eth_dev *dev;
3185
3186         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3187         dev = &rte_eth_devices[port_id];
3188         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3189
3190         return 0;
3191 }
3192
3193 int
3194 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3195 {
3196         struct rte_eth_dev *dev;
3197
3198         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3199
3200         dev = &rte_eth_devices[port_id];
3201         *mtu = dev->data->mtu;
3202         return 0;
3203 }
3204
3205 int
3206 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3207 {
3208         int ret;
3209         struct rte_eth_dev_info dev_info;
3210         struct rte_eth_dev *dev;
3211
3212         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3213         dev = &rte_eth_devices[port_id];
3214         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3215
3216         /*
3217          * Check if the device supports dev_infos_get, if it does not
3218          * skip min_mtu/max_mtu validation here as this requires values
3219          * that are populated within the call to rte_eth_dev_info_get()
3220          * which relies on dev->dev_ops->dev_infos_get.
3221          */
3222         if (*dev->dev_ops->dev_infos_get != NULL) {
3223                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3224                 if (ret != 0)
3225                         return ret;
3226
3227                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3228                         return -EINVAL;
3229         }
3230
3231         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3232         if (!ret)
3233                 dev->data->mtu = mtu;
3234
3235         return eth_err(port_id, ret);
3236 }
3237
3238 int
3239 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3240 {
3241         struct rte_eth_dev *dev;
3242         int ret;
3243
3244         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3245         dev = &rte_eth_devices[port_id];
3246         if (!(dev->data->dev_conf.rxmode.offloads &
3247               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3248                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3249                         port_id);
3250                 return -ENOSYS;
3251         }
3252
3253         if (vlan_id > 4095) {
3254                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3255                         port_id, vlan_id);
3256                 return -EINVAL;
3257         }
3258         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3259
3260         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3261         if (ret == 0) {
3262                 struct rte_vlan_filter_conf *vfc;
3263                 int vidx;
3264                 int vbit;
3265
3266                 vfc = &dev->data->vlan_filter_conf;
3267                 vidx = vlan_id / 64;
3268                 vbit = vlan_id % 64;
3269
3270                 if (on)
3271                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3272                 else
3273                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3274         }
3275
3276         return eth_err(port_id, ret);
3277 }
3278
3279 int
3280 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3281                                     int on)
3282 {
3283         struct rte_eth_dev *dev;
3284
3285         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3286         dev = &rte_eth_devices[port_id];
3287         if (rx_queue_id >= dev->data->nb_rx_queues) {
3288                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3289                 return -EINVAL;
3290         }
3291
3292         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3293         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3294
3295         return 0;
3296 }
3297
3298 int
3299 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3300                                 enum rte_vlan_type vlan_type,
3301                                 uint16_t tpid)
3302 {
3303         struct rte_eth_dev *dev;
3304
3305         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3306         dev = &rte_eth_devices[port_id];
3307         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3308
3309         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3310                                                                tpid));
3311 }
3312
3313 int
3314 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3315 {
3316         struct rte_eth_dev_info dev_info;
3317         struct rte_eth_dev *dev;
3318         int ret = 0;
3319         int mask = 0;
3320         int cur, org = 0;
3321         uint64_t orig_offloads;
3322         uint64_t dev_offloads;
3323         uint64_t new_offloads;
3324
3325         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3326         dev = &rte_eth_devices[port_id];
3327
3328         /* save original values in case of failure */
3329         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3330         dev_offloads = orig_offloads;
3331
3332         /* check which option changed by application */
3333         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3334         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3335         if (cur != org) {
3336                 if (cur)
3337                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3338                 else
3339                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3340                 mask |= ETH_VLAN_STRIP_MASK;
3341         }
3342
3343         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3344         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3345         if (cur != org) {
3346                 if (cur)
3347                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3348                 else
3349                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3350                 mask |= ETH_VLAN_FILTER_MASK;
3351         }
3352
3353         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3354         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3355         if (cur != org) {
3356                 if (cur)
3357                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3358                 else
3359                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3360                 mask |= ETH_VLAN_EXTEND_MASK;
3361         }
3362
3363         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3364         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3365         if (cur != org) {
3366                 if (cur)
3367                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3368                 else
3369                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3370                 mask |= ETH_QINQ_STRIP_MASK;
3371         }
3372
3373         /*no change*/
3374         if (mask == 0)
3375                 return ret;
3376
3377         ret = rte_eth_dev_info_get(port_id, &dev_info);
3378         if (ret != 0)
3379                 return ret;
3380
3381         /* Rx VLAN offloading must be within its device capabilities */
3382         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3383                 new_offloads = dev_offloads & ~orig_offloads;
3384                 RTE_ETHDEV_LOG(ERR,
3385                         "Ethdev port_id=%u requested new added VLAN offloads "
3386                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3387                         "0x%" PRIx64 " in %s()\n",
3388                         port_id, new_offloads, dev_info.rx_offload_capa,
3389                         __func__);
3390                 return -EINVAL;
3391         }
3392
3393         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3394         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3395         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3396         if (ret) {
3397                 /* hit an error restore  original values */
3398                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3399         }
3400
3401         return eth_err(port_id, ret);
3402 }
3403
3404 int
3405 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3406 {
3407         struct rte_eth_dev *dev;
3408         uint64_t *dev_offloads;
3409         int ret = 0;
3410
3411         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3412         dev = &rte_eth_devices[port_id];
3413         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3414
3415         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3416                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3417
3418         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3419                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3420
3421         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3422                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3423
3424         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3425                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3426
3427         return ret;
3428 }
3429
3430 int
3431 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3432 {
3433         struct rte_eth_dev *dev;
3434
3435         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3436         dev = &rte_eth_devices[port_id];
3437         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3438
3439         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3440 }
3441
3442 int
3443 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3444 {
3445         struct rte_eth_dev *dev;
3446
3447         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3448         dev = &rte_eth_devices[port_id];
3449         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3450         memset(fc_conf, 0, sizeof(*fc_conf));
3451         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3452 }
3453
3454 int
3455 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3456 {
3457         struct rte_eth_dev *dev;
3458
3459         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3460         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3461                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3462                 return -EINVAL;
3463         }
3464
3465         dev = &rte_eth_devices[port_id];
3466         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3467         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3468 }
3469
3470 int
3471 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3472                                    struct rte_eth_pfc_conf *pfc_conf)
3473 {
3474         struct rte_eth_dev *dev;
3475
3476         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3477         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3478                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3479                 return -EINVAL;
3480         }
3481
3482         dev = &rte_eth_devices[port_id];
3483         /* High water, low water validation are device specific */
3484         if  (*dev->dev_ops->priority_flow_ctrl_set)
3485                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3486                                         (dev, pfc_conf));
3487         return -ENOTSUP;
3488 }
3489
3490 static int
3491 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3492                         uint16_t reta_size)
3493 {
3494         uint16_t i, num;
3495
3496         if (!reta_conf)
3497                 return -EINVAL;
3498
3499         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3500         for (i = 0; i < num; i++) {
3501                 if (reta_conf[i].mask)
3502                         return 0;
3503         }
3504
3505         return -EINVAL;
3506 }
3507
3508 static int
3509 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3510                          uint16_t reta_size,
3511                          uint16_t max_rxq)
3512 {
3513         uint16_t i, idx, shift;
3514
3515         if (!reta_conf)
3516                 return -EINVAL;
3517
3518         if (max_rxq == 0) {
3519                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3520                 return -EINVAL;
3521         }
3522
3523         for (i = 0; i < reta_size; i++) {
3524                 idx = i / RTE_RETA_GROUP_SIZE;
3525                 shift = i % RTE_RETA_GROUP_SIZE;
3526                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3527                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3528                         RTE_ETHDEV_LOG(ERR,
3529                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3530                                 idx, shift,
3531                                 reta_conf[idx].reta[shift], max_rxq);
3532                         return -EINVAL;
3533                 }
3534         }
3535
3536         return 0;
3537 }
3538
3539 int
3540 rte_eth_dev_rss_reta_update(uint16_t port_id,
3541                             struct rte_eth_rss_reta_entry64 *reta_conf,
3542                             uint16_t reta_size)
3543 {
3544         struct rte_eth_dev *dev;
3545         int ret;
3546
3547         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3548         /* Check mask bits */
3549         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3550         if (ret < 0)
3551                 return ret;
3552
3553         dev = &rte_eth_devices[port_id];
3554
3555         /* Check entry value */
3556         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3557                                 dev->data->nb_rx_queues);
3558         if (ret < 0)
3559                 return ret;
3560
3561         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3562         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3563                                                              reta_size));
3564 }
3565
3566 int
3567 rte_eth_dev_rss_reta_query(uint16_t port_id,
3568                            struct rte_eth_rss_reta_entry64 *reta_conf,
3569                            uint16_t reta_size)
3570 {
3571         struct rte_eth_dev *dev;
3572         int ret;
3573
3574         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3575
3576         /* Check mask bits */
3577         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3578         if (ret < 0)
3579                 return ret;
3580
3581         dev = &rte_eth_devices[port_id];
3582         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3583         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3584                                                             reta_size));
3585 }
3586
3587 int
3588 rte_eth_dev_rss_hash_update(uint16_t port_id,
3589                             struct rte_eth_rss_conf *rss_conf)
3590 {
3591         struct rte_eth_dev *dev;
3592         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3593         int ret;
3594
3595         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3596
3597         ret = rte_eth_dev_info_get(port_id, &dev_info);
3598         if (ret != 0)
3599                 return ret;
3600
3601         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3602
3603         dev = &rte_eth_devices[port_id];
3604         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3605             dev_info.flow_type_rss_offloads) {
3606                 RTE_ETHDEV_LOG(ERR,
3607                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3608                         port_id, rss_conf->rss_hf,
3609                         dev_info.flow_type_rss_offloads);
3610                 return -EINVAL;
3611         }
3612         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3613         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3614                                                                  rss_conf));
3615 }
3616
3617 int
3618 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3619                               struct rte_eth_rss_conf *rss_conf)
3620 {
3621         struct rte_eth_dev *dev;
3622
3623         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3624         dev = &rte_eth_devices[port_id];
3625         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3626         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3627                                                                    rss_conf));
3628 }
3629
3630 int
3631 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3632                                 struct rte_eth_udp_tunnel *udp_tunnel)
3633 {
3634         struct rte_eth_dev *dev;
3635
3636         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3637         if (udp_tunnel == NULL) {
3638                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3639                 return -EINVAL;
3640         }
3641
3642         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3643                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3644                 return -EINVAL;
3645         }
3646
3647         dev = &rte_eth_devices[port_id];
3648         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3649         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3650                                                                 udp_tunnel));
3651 }
3652
3653 int
3654 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3655                                    struct rte_eth_udp_tunnel *udp_tunnel)
3656 {
3657         struct rte_eth_dev *dev;
3658
3659         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3660         dev = &rte_eth_devices[port_id];
3661
3662         if (udp_tunnel == NULL) {
3663                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3664                 return -EINVAL;
3665         }
3666
3667         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3668                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3669                 return -EINVAL;
3670         }
3671
3672         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3673         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3674                                                                 udp_tunnel));
3675 }
3676
3677 int
3678 rte_eth_led_on(uint16_t port_id)
3679 {
3680         struct rte_eth_dev *dev;
3681
3682         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3683         dev = &rte_eth_devices[port_id];
3684         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3685         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3686 }
3687
3688 int
3689 rte_eth_led_off(uint16_t port_id)
3690 {
3691         struct rte_eth_dev *dev;
3692
3693         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3694         dev = &rte_eth_devices[port_id];
3695         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3696         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3697 }
3698
3699 int
3700 rte_eth_fec_get_capability(uint16_t port_id,
3701                            struct rte_eth_fec_capa *speed_fec_capa,
3702                            unsigned int num)
3703 {
3704         struct rte_eth_dev *dev;
3705         int ret;
3706
3707         if (speed_fec_capa == NULL && num > 0)
3708                 return -EINVAL;
3709
3710         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3711         dev = &rte_eth_devices[port_id];
3712         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
3713         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
3714
3715         return ret;
3716 }
3717
3718 int
3719 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
3720 {
3721         struct rte_eth_dev *dev;
3722
3723         if (fec_capa == NULL)
3724                 return -EINVAL;
3725
3726         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3727         dev = &rte_eth_devices[port_id];
3728         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
3729         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
3730 }
3731
3732 int
3733 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
3734 {
3735         struct rte_eth_dev *dev;
3736
3737         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3738         dev = &rte_eth_devices[port_id];
3739         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
3740         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
3741 }
3742
3743 /*
3744  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3745  * an empty spot.
3746  */
3747 static int
3748 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3749 {
3750         struct rte_eth_dev_info dev_info;
3751         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3752         unsigned i;
3753         int ret;
3754
3755         ret = rte_eth_dev_info_get(port_id, &dev_info);
3756         if (ret != 0)
3757                 return -1;
3758
3759         for (i = 0; i < dev_info.max_mac_addrs; i++)
3760                 if (memcmp(addr, &dev->data->mac_addrs[i],
3761                                 RTE_ETHER_ADDR_LEN) == 0)
3762                         return i;
3763
3764         return -1;
3765 }
3766
3767 static const struct rte_ether_addr null_mac_addr;
3768
3769 int
3770 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3771                         uint32_t pool)
3772 {
3773         struct rte_eth_dev *dev;
3774         int index;
3775         uint64_t pool_mask;
3776         int ret;
3777
3778         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3779         dev = &rte_eth_devices[port_id];
3780         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3781
3782         if (rte_is_zero_ether_addr(addr)) {
3783                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3784                         port_id);
3785                 return -EINVAL;
3786         }
3787         if (pool >= ETH_64_POOLS) {
3788                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3789                 return -EINVAL;
3790         }
3791
3792         index = get_mac_addr_index(port_id, addr);
3793         if (index < 0) {
3794                 index = get_mac_addr_index(port_id, &null_mac_addr);
3795                 if (index < 0) {
3796                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3797                                 port_id);
3798                         return -ENOSPC;
3799                 }
3800         } else {
3801                 pool_mask = dev->data->mac_pool_sel[index];
3802
3803                 /* Check if both MAC address and pool is already there, and do nothing */
3804                 if (pool_mask & (1ULL << pool))
3805                         return 0;
3806         }
3807
3808         /* Update NIC */
3809         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3810
3811         if (ret == 0) {
3812                 /* Update address in NIC data structure */
3813                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3814
3815                 /* Update pool bitmap in NIC data structure */
3816                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3817         }
3818
3819         return eth_err(port_id, ret);
3820 }
3821
3822 int
3823 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
3824 {
3825         struct rte_eth_dev *dev;
3826         int index;
3827
3828         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3829         dev = &rte_eth_devices[port_id];
3830         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3831
3832         index = get_mac_addr_index(port_id, addr);
3833         if (index == 0) {
3834                 RTE_ETHDEV_LOG(ERR,
3835                         "Port %u: Cannot remove default MAC address\n",
3836                         port_id);
3837                 return -EADDRINUSE;
3838         } else if (index < 0)
3839                 return 0;  /* Do nothing if address wasn't found */
3840
3841         /* Update NIC */
3842         (*dev->dev_ops->mac_addr_remove)(dev, index);
3843
3844         /* Update address in NIC data structure */
3845         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3846
3847         /* reset pool bitmap */
3848         dev->data->mac_pool_sel[index] = 0;
3849
3850         return 0;
3851 }
3852
3853 int
3854 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
3855 {
3856         struct rte_eth_dev *dev;
3857         int ret;
3858
3859         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3860
3861         if (!rte_is_valid_assigned_ether_addr(addr))
3862                 return -EINVAL;
3863
3864         dev = &rte_eth_devices[port_id];
3865         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3866
3867         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3868         if (ret < 0)
3869                 return ret;
3870
3871         /* Update default address in NIC data structure */
3872         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3873
3874         return 0;
3875 }
3876
3877
3878 /*
3879  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3880  * an empty spot.
3881  */
3882 static int
3883 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3884 {
3885         struct rte_eth_dev_info dev_info;
3886         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3887         unsigned i;
3888         int ret;
3889
3890         ret = rte_eth_dev_info_get(port_id, &dev_info);
3891         if (ret != 0)
3892                 return -1;
3893
3894         if (!dev->data->hash_mac_addrs)
3895                 return -1;
3896
3897         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3898                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3899                         RTE_ETHER_ADDR_LEN) == 0)
3900                         return i;
3901
3902         return -1;
3903 }
3904
3905 int
3906 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3907                                 uint8_t on)
3908 {
3909         int index;
3910         int ret;
3911         struct rte_eth_dev *dev;
3912
3913         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3914
3915         dev = &rte_eth_devices[port_id];
3916         if (rte_is_zero_ether_addr(addr)) {
3917                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3918                         port_id);
3919                 return -EINVAL;
3920         }
3921
3922         index = get_hash_mac_addr_index(port_id, addr);
3923         /* Check if it's already there, and do nothing */
3924         if ((index >= 0) && on)
3925                 return 0;
3926
3927         if (index < 0) {
3928                 if (!on) {
3929                         RTE_ETHDEV_LOG(ERR,
3930                                 "Port %u: the MAC address was not set in UTA\n",
3931                                 port_id);
3932                         return -EINVAL;
3933                 }
3934
3935                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3936                 if (index < 0) {
3937                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3938                                 port_id);
3939                         return -ENOSPC;
3940                 }
3941         }
3942
3943         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3944         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3945         if (ret == 0) {
3946                 /* Update address in NIC data structure */
3947                 if (on)
3948                         rte_ether_addr_copy(addr,
3949                                         &dev->data->hash_mac_addrs[index]);
3950                 else
3951                         rte_ether_addr_copy(&null_mac_addr,
3952                                         &dev->data->hash_mac_addrs[index]);
3953         }
3954
3955         return eth_err(port_id, ret);
3956 }
3957
3958 int
3959 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3960 {
3961         struct rte_eth_dev *dev;
3962
3963         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3964
3965         dev = &rte_eth_devices[port_id];
3966
3967         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3968         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3969                                                                        on));
3970 }
3971
3972 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3973                                         uint16_t tx_rate)
3974 {
3975         struct rte_eth_dev *dev;
3976         struct rte_eth_dev_info dev_info;
3977         struct rte_eth_link link;
3978         int ret;
3979
3980         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3981
3982         ret = rte_eth_dev_info_get(port_id, &dev_info);
3983         if (ret != 0)
3984                 return ret;
3985
3986         dev = &rte_eth_devices[port_id];
3987         link = dev->data->dev_link;
3988
3989         if (queue_idx > dev_info.max_tx_queues) {
3990                 RTE_ETHDEV_LOG(ERR,
3991                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3992                         port_id, queue_idx);
3993                 return -EINVAL;
3994         }
3995
3996         if (tx_rate > link.link_speed) {
3997                 RTE_ETHDEV_LOG(ERR,
3998                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3999                         tx_rate, link.link_speed);
4000                 return -EINVAL;
4001         }
4002
4003         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4004         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4005                                                         queue_idx, tx_rate));
4006 }
4007
4008 int
4009 rte_eth_mirror_rule_set(uint16_t port_id,
4010                         struct rte_eth_mirror_conf *mirror_conf,
4011                         uint8_t rule_id, uint8_t on)
4012 {
4013         struct rte_eth_dev *dev;
4014
4015         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4016         if (mirror_conf->rule_type == 0) {
4017                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4018                 return -EINVAL;
4019         }
4020
4021         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4022                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4023                         ETH_64_POOLS - 1);
4024                 return -EINVAL;
4025         }
4026
4027         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4028              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4029             (mirror_conf->pool_mask == 0)) {
4030                 RTE_ETHDEV_LOG(ERR,
4031                         "Invalid mirror pool, pool mask can not be 0\n");
4032                 return -EINVAL;
4033         }
4034
4035         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4036             mirror_conf->vlan.vlan_mask == 0) {
4037                 RTE_ETHDEV_LOG(ERR,
4038                         "Invalid vlan mask, vlan mask can not be 0\n");
4039                 return -EINVAL;
4040         }
4041
4042         dev = &rte_eth_devices[port_id];
4043         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4044
4045         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4046                                                 mirror_conf, rule_id, on));
4047 }
4048
4049 int
4050 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4051 {
4052         struct rte_eth_dev *dev;
4053
4054         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4055
4056         dev = &rte_eth_devices[port_id];
4057         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4058
4059         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
4060                                                                    rule_id));
4061 }
4062
4063 RTE_INIT(eth_dev_init_cb_lists)
4064 {
4065         int i;
4066
4067         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4068                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4069 }
4070
4071 int
4072 rte_eth_dev_callback_register(uint16_t port_id,
4073                         enum rte_eth_event_type event,
4074                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4075 {
4076         struct rte_eth_dev *dev;
4077         struct rte_eth_dev_callback *user_cb;
4078         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4079         uint16_t last_port;
4080
4081         if (!cb_fn)
4082                 return -EINVAL;
4083
4084         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4085                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4086                 return -EINVAL;
4087         }
4088
4089         if (port_id == RTE_ETH_ALL) {
4090                 next_port = 0;
4091                 last_port = RTE_MAX_ETHPORTS - 1;
4092         } else {
4093                 next_port = last_port = port_id;
4094         }
4095
4096         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4097
4098         do {
4099                 dev = &rte_eth_devices[next_port];
4100
4101                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4102                         if (user_cb->cb_fn == cb_fn &&
4103                                 user_cb->cb_arg == cb_arg &&
4104                                 user_cb->event == event) {
4105                                 break;
4106                         }
4107                 }
4108
4109                 /* create a new callback. */
4110                 if (user_cb == NULL) {
4111                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4112                                 sizeof(struct rte_eth_dev_callback), 0);
4113                         if (user_cb != NULL) {
4114                                 user_cb->cb_fn = cb_fn;
4115                                 user_cb->cb_arg = cb_arg;
4116                                 user_cb->event = event;
4117                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4118                                                   user_cb, next);
4119                         } else {
4120                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4121                                 rte_eth_dev_callback_unregister(port_id, event,
4122                                                                 cb_fn, cb_arg);
4123                                 return -ENOMEM;
4124                         }
4125
4126                 }
4127         } while (++next_port <= last_port);
4128
4129         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4130         return 0;
4131 }
4132
4133 int
4134 rte_eth_dev_callback_unregister(uint16_t port_id,
4135                         enum rte_eth_event_type event,
4136                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4137 {
4138         int ret;
4139         struct rte_eth_dev *dev;
4140         struct rte_eth_dev_callback *cb, *next;
4141         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4142         uint16_t last_port;
4143
4144         if (!cb_fn)
4145                 return -EINVAL;
4146
4147         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4148                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4149                 return -EINVAL;
4150         }
4151
4152         if (port_id == RTE_ETH_ALL) {
4153                 next_port = 0;
4154                 last_port = RTE_MAX_ETHPORTS - 1;
4155         } else {
4156                 next_port = last_port = port_id;
4157         }
4158
4159         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4160
4161         do {
4162                 dev = &rte_eth_devices[next_port];
4163                 ret = 0;
4164                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4165                      cb = next) {
4166
4167                         next = TAILQ_NEXT(cb, next);
4168
4169                         if (cb->cb_fn != cb_fn || cb->event != event ||
4170                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4171                                 continue;
4172
4173                         /*
4174                          * if this callback is not executing right now,
4175                          * then remove it.
4176                          */
4177                         if (cb->active == 0) {
4178                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4179                                 rte_free(cb);
4180                         } else {
4181                                 ret = -EAGAIN;
4182                         }
4183                 }
4184         } while (++next_port <= last_port);
4185
4186         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4187         return ret;
4188 }
4189
4190 int
4191 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4192         enum rte_eth_event_type event, void *ret_param)
4193 {
4194         struct rte_eth_dev_callback *cb_lst;
4195         struct rte_eth_dev_callback dev_cb;
4196         int rc = 0;
4197
4198         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4199         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4200                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4201                         continue;
4202                 dev_cb = *cb_lst;
4203                 cb_lst->active = 1;
4204                 if (ret_param != NULL)
4205                         dev_cb.ret_param = ret_param;
4206
4207                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4208                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4209                                 dev_cb.cb_arg, dev_cb.ret_param);
4210                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4211                 cb_lst->active = 0;
4212         }
4213         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4214         return rc;
4215 }
4216
4217 void
4218 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4219 {
4220         if (dev == NULL)
4221                 return;
4222
4223         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4224
4225         dev->state = RTE_ETH_DEV_ATTACHED;
4226 }
4227
4228 int
4229 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4230 {
4231         uint32_t vec;
4232         struct rte_eth_dev *dev;
4233         struct rte_intr_handle *intr_handle;
4234         uint16_t qid;
4235         int rc;
4236
4237         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4238
4239         dev = &rte_eth_devices[port_id];
4240
4241         if (!dev->intr_handle) {
4242                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4243                 return -ENOTSUP;
4244         }
4245
4246         intr_handle = dev->intr_handle;
4247         if (!intr_handle->intr_vec) {
4248                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4249                 return -EPERM;
4250         }
4251
4252         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4253                 vec = intr_handle->intr_vec[qid];
4254                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4255                 if (rc && rc != -EEXIST) {
4256                         RTE_ETHDEV_LOG(ERR,
4257                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4258                                 port_id, qid, op, epfd, vec);
4259                 }
4260         }
4261
4262         return 0;
4263 }
4264
4265 int
4266 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4267 {
4268         struct rte_intr_handle *intr_handle;
4269         struct rte_eth_dev *dev;
4270         unsigned int efd_idx;
4271         uint32_t vec;
4272         int fd;
4273
4274         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4275
4276         dev = &rte_eth_devices[port_id];
4277
4278         if (queue_id >= dev->data->nb_rx_queues) {
4279                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4280                 return -1;
4281         }
4282
4283         if (!dev->intr_handle) {
4284                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4285                 return -1;
4286         }
4287
4288         intr_handle = dev->intr_handle;
4289         if (!intr_handle->intr_vec) {
4290                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4291                 return -1;
4292         }
4293
4294         vec = intr_handle->intr_vec[queue_id];
4295         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4296                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4297         fd = intr_handle->efds[efd_idx];
4298
4299         return fd;
4300 }
4301
4302 static inline int
4303 eth_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4304                 const char *ring_name)
4305 {
4306         return snprintf(name, len, "eth_p%d_q%d_%s",
4307                         port_id, queue_id, ring_name);
4308 }
4309
4310 const struct rte_memzone *
4311 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4312                          uint16_t queue_id, size_t size, unsigned align,
4313                          int socket_id)
4314 {
4315         char z_name[RTE_MEMZONE_NAMESIZE];
4316         const struct rte_memzone *mz;
4317         int rc;
4318
4319         rc = eth_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4320                         queue_id, ring_name);
4321         if (rc >= RTE_MEMZONE_NAMESIZE) {
4322                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4323                 rte_errno = ENAMETOOLONG;
4324                 return NULL;
4325         }
4326
4327         mz = rte_memzone_lookup(z_name);
4328         if (mz) {
4329                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4330                                 size > mz->len ||
4331                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4332                         RTE_ETHDEV_LOG(ERR,
4333                                 "memzone %s does not justify the requested attributes\n",
4334                                 mz->name);
4335                         return NULL;
4336                 }
4337
4338                 return mz;
4339         }
4340
4341         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4342                         RTE_MEMZONE_IOVA_CONTIG, align);
4343 }
4344
4345 int
4346 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4347                 uint16_t queue_id)
4348 {
4349         char z_name[RTE_MEMZONE_NAMESIZE];
4350         const struct rte_memzone *mz;
4351         int rc = 0;
4352
4353         rc = eth_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4354                         queue_id, ring_name);
4355         if (rc >= RTE_MEMZONE_NAMESIZE) {
4356                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4357                 return -ENAMETOOLONG;
4358         }
4359
4360         mz = rte_memzone_lookup(z_name);
4361         if (mz)
4362                 rc = rte_memzone_free(mz);
4363         else
4364                 rc = -ENOENT;
4365
4366         return rc;
4367 }
4368
4369 int
4370 rte_eth_dev_create(struct rte_device *device, const char *name,
4371         size_t priv_data_size,
4372         ethdev_bus_specific_init ethdev_bus_specific_init,
4373         void *bus_init_params,
4374         ethdev_init_t ethdev_init, void *init_params)
4375 {
4376         struct rte_eth_dev *ethdev;
4377         int retval;
4378
4379         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4380
4381         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4382                 ethdev = rte_eth_dev_allocate(name);
4383                 if (!ethdev)
4384                         return -ENODEV;
4385
4386                 if (priv_data_size) {
4387                         ethdev->data->dev_private = rte_zmalloc_socket(
4388                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4389                                 device->numa_node);
4390
4391                         if (!ethdev->data->dev_private) {
4392                                 RTE_ETHDEV_LOG(ERR,
4393                                         "failed to allocate private data\n");
4394                                 retval = -ENOMEM;
4395                                 goto probe_failed;
4396                         }
4397                 }
4398         } else {
4399                 ethdev = rte_eth_dev_attach_secondary(name);
4400                 if (!ethdev) {
4401                         RTE_ETHDEV_LOG(ERR,
4402                                 "secondary process attach failed, ethdev doesn't exist\n");
4403                         return  -ENODEV;
4404                 }
4405         }
4406
4407         ethdev->device = device;
4408
4409         if (ethdev_bus_specific_init) {
4410                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4411                 if (retval) {
4412                         RTE_ETHDEV_LOG(ERR,
4413                                 "ethdev bus specific initialisation failed\n");
4414                         goto probe_failed;
4415                 }
4416         }
4417
4418         retval = ethdev_init(ethdev, init_params);
4419         if (retval) {
4420                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4421                 goto probe_failed;
4422         }
4423
4424         rte_eth_dev_probing_finish(ethdev);
4425
4426         return retval;
4427
4428 probe_failed:
4429         rte_eth_dev_release_port(ethdev);
4430         return retval;
4431 }
4432
4433 int
4434 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4435         ethdev_uninit_t ethdev_uninit)
4436 {
4437         int ret;
4438
4439         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4440         if (!ethdev)
4441                 return -ENODEV;
4442
4443         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4444
4445         ret = ethdev_uninit(ethdev);
4446         if (ret)
4447                 return ret;
4448
4449         return rte_eth_dev_release_port(ethdev);
4450 }
4451
4452 int
4453 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4454                           int epfd, int op, void *data)
4455 {
4456         uint32_t vec;
4457         struct rte_eth_dev *dev;
4458         struct rte_intr_handle *intr_handle;
4459         int rc;
4460
4461         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4462
4463         dev = &rte_eth_devices[port_id];
4464         if (queue_id >= dev->data->nb_rx_queues) {
4465                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4466                 return -EINVAL;
4467         }
4468
4469         if (!dev->intr_handle) {
4470                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4471                 return -ENOTSUP;
4472         }
4473
4474         intr_handle = dev->intr_handle;
4475         if (!intr_handle->intr_vec) {
4476                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4477                 return -EPERM;
4478         }
4479
4480         vec = intr_handle->intr_vec[queue_id];
4481         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4482         if (rc && rc != -EEXIST) {
4483                 RTE_ETHDEV_LOG(ERR,
4484                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4485                         port_id, queue_id, op, epfd, vec);
4486                 return rc;
4487         }
4488
4489         return 0;
4490 }
4491
4492 int
4493 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4494                            uint16_t queue_id)
4495 {
4496         struct rte_eth_dev *dev;
4497
4498         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4499
4500         dev = &rte_eth_devices[port_id];
4501
4502         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4503         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4504                                                                 queue_id));
4505 }
4506
4507 int
4508 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4509                             uint16_t queue_id)
4510 {
4511         struct rte_eth_dev *dev;
4512
4513         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4514
4515         dev = &rte_eth_devices[port_id];
4516
4517         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4518         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4519                                                                 queue_id));
4520 }
4521
4522
4523 int
4524 rte_eth_dev_filter_supported(uint16_t port_id,
4525                              enum rte_filter_type filter_type)
4526 {
4527         struct rte_eth_dev *dev;
4528
4529         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4530
4531         dev = &rte_eth_devices[port_id];
4532         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4533         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4534                                 RTE_ETH_FILTER_NOP, NULL);
4535 }
4536
4537 int
4538 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4539                         enum rte_filter_op filter_op, void *arg)
4540 {
4541         struct rte_eth_dev *dev;
4542
4543         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4544
4545         dev = &rte_eth_devices[port_id];
4546         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4547         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4548                                                              filter_op, arg));
4549 }
4550
4551 const struct rte_eth_rxtx_callback *
4552 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4553                 rte_rx_callback_fn fn, void *user_param)
4554 {
4555 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4556         rte_errno = ENOTSUP;
4557         return NULL;
4558 #endif
4559         struct rte_eth_dev *dev;
4560
4561         /* check input parameters */
4562         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4563                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4564                 rte_errno = EINVAL;
4565                 return NULL;
4566         }
4567         dev = &rte_eth_devices[port_id];
4568         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4569                 rte_errno = EINVAL;
4570                 return NULL;
4571         }
4572         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4573
4574         if (cb == NULL) {
4575                 rte_errno = ENOMEM;
4576                 return NULL;
4577         }
4578
4579         cb->fn.rx = fn;
4580         cb->param = user_param;
4581
4582         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4583         /* Add the callbacks in fifo order. */
4584         struct rte_eth_rxtx_callback *tail =
4585                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4586
4587         if (!tail) {
4588                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4589
4590         } else {
4591                 while (tail->next)
4592                         tail = tail->next;
4593                 tail->next = cb;
4594         }
4595         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4596
4597         return cb;
4598 }
4599
4600 const struct rte_eth_rxtx_callback *
4601 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4602                 rte_rx_callback_fn fn, void *user_param)
4603 {
4604 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4605         rte_errno = ENOTSUP;
4606         return NULL;
4607 #endif
4608         /* check input parameters */
4609         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4610                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4611                 rte_errno = EINVAL;
4612                 return NULL;
4613         }
4614
4615         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4616
4617         if (cb == NULL) {
4618                 rte_errno = ENOMEM;
4619                 return NULL;
4620         }
4621
4622         cb->fn.rx = fn;
4623         cb->param = user_param;
4624
4625         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4626         /* Add the callbacks at first position */
4627         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4628         rte_smp_wmb();
4629         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4630         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4631
4632         return cb;
4633 }
4634
4635 const struct rte_eth_rxtx_callback *
4636 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4637                 rte_tx_callback_fn fn, void *user_param)
4638 {
4639 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4640         rte_errno = ENOTSUP;
4641         return NULL;
4642 #endif
4643         struct rte_eth_dev *dev;
4644
4645         /* check input parameters */
4646         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4647                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4648                 rte_errno = EINVAL;
4649                 return NULL;
4650         }
4651
4652         dev = &rte_eth_devices[port_id];
4653         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4654                 rte_errno = EINVAL;
4655                 return NULL;
4656         }
4657
4658         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4659
4660         if (cb == NULL) {
4661                 rte_errno = ENOMEM;
4662                 return NULL;
4663         }
4664
4665         cb->fn.tx = fn;
4666         cb->param = user_param;
4667
4668         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4669         /* Add the callbacks in fifo order. */
4670         struct rte_eth_rxtx_callback *tail =
4671                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4672
4673         if (!tail) {
4674                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4675
4676         } else {
4677                 while (tail->next)
4678                         tail = tail->next;
4679                 tail->next = cb;
4680         }
4681         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4682
4683         return cb;
4684 }
4685
4686 int
4687 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4688                 const struct rte_eth_rxtx_callback *user_cb)
4689 {
4690 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4691         return -ENOTSUP;
4692 #endif
4693         /* Check input parameters. */
4694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4695         if (user_cb == NULL ||
4696                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4697                 return -EINVAL;
4698
4699         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4700         struct rte_eth_rxtx_callback *cb;
4701         struct rte_eth_rxtx_callback **prev_cb;
4702         int ret = -EINVAL;
4703
4704         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4705         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4706         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4707                 cb = *prev_cb;
4708                 if (cb == user_cb) {
4709                         /* Remove the user cb from the callback list. */
4710                         *prev_cb = cb->next;
4711                         ret = 0;
4712                         break;
4713                 }
4714         }
4715         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4716
4717         return ret;
4718 }
4719
4720 int
4721 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4722                 const struct rte_eth_rxtx_callback *user_cb)
4723 {
4724 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4725         return -ENOTSUP;
4726 #endif
4727         /* Check input parameters. */
4728         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4729         if (user_cb == NULL ||
4730                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4731                 return -EINVAL;
4732
4733         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4734         int ret = -EINVAL;
4735         struct rte_eth_rxtx_callback *cb;
4736         struct rte_eth_rxtx_callback **prev_cb;
4737
4738         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4739         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4740         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4741                 cb = *prev_cb;
4742                 if (cb == user_cb) {
4743                         /* Remove the user cb from the callback list. */
4744                         *prev_cb = cb->next;
4745                         ret = 0;
4746                         break;
4747                 }
4748         }
4749         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4750
4751         return ret;
4752 }
4753
4754 int
4755 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4756         struct rte_eth_rxq_info *qinfo)
4757 {
4758         struct rte_eth_dev *dev;
4759
4760         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4761
4762         if (qinfo == NULL)
4763                 return -EINVAL;
4764
4765         dev = &rte_eth_devices[port_id];
4766         if (queue_id >= dev->data->nb_rx_queues) {
4767                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4768                 return -EINVAL;
4769         }
4770
4771         if (dev->data->rx_queues == NULL ||
4772                         dev->data->rx_queues[queue_id] == NULL) {
4773                 RTE_ETHDEV_LOG(ERR,
4774                                "Rx queue %"PRIu16" of device with port_id=%"
4775                                PRIu16" has not been setup\n",
4776                                queue_id, port_id);
4777                 return -EINVAL;
4778         }
4779
4780         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4781                 RTE_ETHDEV_LOG(INFO,
4782                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4783                         queue_id, port_id);
4784                 return -EINVAL;
4785         }
4786
4787         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4788
4789         memset(qinfo, 0, sizeof(*qinfo));
4790         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4791         return 0;
4792 }
4793
4794 int
4795 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4796         struct rte_eth_txq_info *qinfo)
4797 {
4798         struct rte_eth_dev *dev;
4799
4800         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4801
4802         if (qinfo == NULL)
4803                 return -EINVAL;
4804
4805         dev = &rte_eth_devices[port_id];
4806         if (queue_id >= dev->data->nb_tx_queues) {
4807                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4808                 return -EINVAL;
4809         }
4810
4811         if (dev->data->tx_queues == NULL ||
4812                         dev->data->tx_queues[queue_id] == NULL) {
4813                 RTE_ETHDEV_LOG(ERR,
4814                                "Tx queue %"PRIu16" of device with port_id=%"
4815                                PRIu16" has not been setup\n",
4816                                queue_id, port_id);
4817                 return -EINVAL;
4818         }
4819
4820         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4821                 RTE_ETHDEV_LOG(INFO,
4822                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4823                         queue_id, port_id);
4824                 return -EINVAL;
4825         }
4826
4827         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4828
4829         memset(qinfo, 0, sizeof(*qinfo));
4830         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4831
4832         return 0;
4833 }
4834
4835 int
4836 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4837                           struct rte_eth_burst_mode *mode)
4838 {
4839         struct rte_eth_dev *dev;
4840
4841         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4842
4843         if (mode == NULL)
4844                 return -EINVAL;
4845
4846         dev = &rte_eth_devices[port_id];
4847
4848         if (queue_id >= dev->data->nb_rx_queues) {
4849                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4850                 return -EINVAL;
4851         }
4852
4853         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
4854         memset(mode, 0, sizeof(*mode));
4855         return eth_err(port_id,
4856                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
4857 }
4858
4859 int
4860 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4861                           struct rte_eth_burst_mode *mode)
4862 {
4863         struct rte_eth_dev *dev;
4864
4865         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4866
4867         if (mode == NULL)
4868                 return -EINVAL;
4869
4870         dev = &rte_eth_devices[port_id];
4871
4872         if (queue_id >= dev->data->nb_tx_queues) {
4873                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4874                 return -EINVAL;
4875         }
4876
4877         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
4878         memset(mode, 0, sizeof(*mode));
4879         return eth_err(port_id,
4880                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
4881 }
4882
4883 int
4884 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4885                              struct rte_ether_addr *mc_addr_set,
4886                              uint32_t nb_mc_addr)
4887 {
4888         struct rte_eth_dev *dev;
4889
4890         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4891
4892         dev = &rte_eth_devices[port_id];
4893         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4894         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4895                                                 mc_addr_set, nb_mc_addr));
4896 }
4897
4898 int
4899 rte_eth_timesync_enable(uint16_t port_id)
4900 {
4901         struct rte_eth_dev *dev;
4902
4903         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4904         dev = &rte_eth_devices[port_id];
4905
4906         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4907         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4908 }
4909
4910 int
4911 rte_eth_timesync_disable(uint16_t port_id)
4912 {
4913         struct rte_eth_dev *dev;
4914
4915         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4916         dev = &rte_eth_devices[port_id];
4917
4918         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4919         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4920 }
4921
4922 int
4923 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4924                                    uint32_t flags)
4925 {
4926         struct rte_eth_dev *dev;
4927
4928         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4929         dev = &rte_eth_devices[port_id];
4930
4931         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4932         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4933                                 (dev, timestamp, flags));
4934 }
4935
4936 int
4937 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4938                                    struct timespec *timestamp)
4939 {
4940         struct rte_eth_dev *dev;
4941
4942         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4943         dev = &rte_eth_devices[port_id];
4944
4945         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4946         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4947                                 (dev, timestamp));
4948 }
4949
4950 int
4951 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4952 {
4953         struct rte_eth_dev *dev;
4954
4955         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4956         dev = &rte_eth_devices[port_id];
4957
4958         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4959         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4960                                                                       delta));
4961 }
4962
4963 int
4964 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4965 {
4966         struct rte_eth_dev *dev;
4967
4968         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4969         dev = &rte_eth_devices[port_id];
4970
4971         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4972         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4973                                                                 timestamp));
4974 }
4975
4976 int
4977 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4978 {
4979         struct rte_eth_dev *dev;
4980
4981         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4982         dev = &rte_eth_devices[port_id];
4983
4984         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4985         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4986                                                                 timestamp));
4987 }
4988
4989 int
4990 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
4991 {
4992         struct rte_eth_dev *dev;
4993
4994         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4995         dev = &rte_eth_devices[port_id];
4996
4997         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
4998         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
4999 }
5000
5001 int
5002 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5003 {
5004         struct rte_eth_dev *dev;
5005
5006         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5007
5008         dev = &rte_eth_devices[port_id];
5009         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5010         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5011 }
5012
5013 int
5014 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5015 {
5016         struct rte_eth_dev *dev;
5017
5018         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5019
5020         dev = &rte_eth_devices[port_id];
5021         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5022         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5023 }
5024
5025 int
5026 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5027 {
5028         struct rte_eth_dev *dev;
5029
5030         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5031
5032         dev = &rte_eth_devices[port_id];
5033         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5034         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5035 }
5036
5037 int
5038 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5039 {
5040         struct rte_eth_dev *dev;
5041
5042         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5043
5044         dev = &rte_eth_devices[port_id];
5045         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5046         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5047 }
5048
5049 int
5050 rte_eth_dev_get_module_info(uint16_t port_id,
5051                             struct rte_eth_dev_module_info *modinfo)
5052 {
5053         struct rte_eth_dev *dev;
5054
5055         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5056
5057         dev = &rte_eth_devices[port_id];
5058         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5059         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5060 }
5061
5062 int
5063 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5064                               struct rte_dev_eeprom_info *info)
5065 {
5066         struct rte_eth_dev *dev;
5067
5068         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5069
5070         dev = &rte_eth_devices[port_id];
5071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5072         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5073 }
5074
5075 int
5076 rte_eth_dev_get_dcb_info(uint16_t port_id,
5077                              struct rte_eth_dcb_info *dcb_info)
5078 {
5079         struct rte_eth_dev *dev;
5080
5081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5082
5083         dev = &rte_eth_devices[port_id];
5084         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5085
5086         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5087         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5088 }
5089
5090 int
5091 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
5092                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
5093 {
5094         struct rte_eth_dev *dev;
5095
5096         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5097         if (l2_tunnel == NULL) {
5098                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
5099                 return -EINVAL;
5100         }
5101
5102         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
5103                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
5104                 return -EINVAL;
5105         }
5106
5107         dev = &rte_eth_devices[port_id];
5108         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
5109                                 -ENOTSUP);
5110         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
5111                                                                 l2_tunnel));
5112 }
5113
5114 int
5115 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
5116                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
5117                                   uint32_t mask,
5118                                   uint8_t en)
5119 {
5120         struct rte_eth_dev *dev;
5121
5122         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5123
5124         if (l2_tunnel == NULL) {
5125                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
5126                 return -EINVAL;
5127         }
5128
5129         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
5130                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
5131                 return -EINVAL;
5132         }
5133
5134         if (mask == 0) {
5135                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
5136                 return -EINVAL;
5137         }
5138
5139         dev = &rte_eth_devices[port_id];
5140         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
5141                                 -ENOTSUP);
5142         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
5143                                                         l2_tunnel, mask, en));
5144 }
5145
5146 static void
5147 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5148                            const struct rte_eth_desc_lim *desc_lim)
5149 {
5150         if (desc_lim->nb_align != 0)
5151                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5152
5153         if (desc_lim->nb_max != 0)
5154                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5155
5156         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5157 }
5158
5159 int
5160 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5161                                  uint16_t *nb_rx_desc,
5162                                  uint16_t *nb_tx_desc)
5163 {
5164         struct rte_eth_dev_info dev_info;
5165         int ret;
5166
5167         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5168
5169         ret = rte_eth_dev_info_get(port_id, &dev_info);
5170         if (ret != 0)
5171                 return ret;
5172
5173         if (nb_rx_desc != NULL)
5174                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5175
5176         if (nb_tx_desc != NULL)
5177                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5178
5179         return 0;
5180 }
5181
5182 int
5183 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5184                                    struct rte_eth_hairpin_cap *cap)
5185 {
5186         struct rte_eth_dev *dev;
5187
5188         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
5189
5190         dev = &rte_eth_devices[port_id];
5191         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5192         memset(cap, 0, sizeof(*cap));
5193         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5194 }
5195
5196 int
5197 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5198 {
5199         if (dev->data->rx_queue_state[queue_id] ==
5200             RTE_ETH_QUEUE_STATE_HAIRPIN)
5201                 return 1;
5202         return 0;
5203 }
5204
5205 int
5206 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5207 {
5208         if (dev->data->tx_queue_state[queue_id] ==
5209             RTE_ETH_QUEUE_STATE_HAIRPIN)
5210                 return 1;
5211         return 0;
5212 }
5213
5214 int
5215 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5216 {
5217         struct rte_eth_dev *dev;
5218
5219         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5220
5221         if (pool == NULL)
5222                 return -EINVAL;
5223
5224         dev = &rte_eth_devices[port_id];
5225
5226         if (*dev->dev_ops->pool_ops_supported == NULL)
5227                 return 1; /* all pools are supported */
5228
5229         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5230 }
5231
5232 /**
5233  * A set of values to describe the possible states of a switch domain.
5234  */
5235 enum rte_eth_switch_domain_state {
5236         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5237         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5238 };
5239
5240 /**
5241  * Array of switch domains available for allocation. Array is sized to
5242  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5243  * ethdev ports in a single process.
5244  */
5245 static struct rte_eth_dev_switch {
5246         enum rte_eth_switch_domain_state state;
5247 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
5248
5249 int
5250 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5251 {
5252         unsigned int i;
5253
5254         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5255
5256         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5257                 if (rte_eth_switch_domains[i].state ==
5258                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5259                         rte_eth_switch_domains[i].state =
5260                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5261                         *domain_id = i;
5262                         return 0;
5263                 }
5264         }
5265
5266         return -ENOSPC;
5267 }
5268
5269 int
5270 rte_eth_switch_domain_free(uint16_t domain_id)
5271 {
5272         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5273                 domain_id >= RTE_MAX_ETHPORTS)
5274                 return -EINVAL;
5275
5276         if (rte_eth_switch_domains[domain_id].state !=
5277                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5278                 return -EINVAL;
5279
5280         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5281
5282         return 0;
5283 }
5284
5285 static int
5286 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5287 {
5288         int state;
5289         struct rte_kvargs_pair *pair;
5290         char *letter;
5291
5292         arglist->str = strdup(str_in);
5293         if (arglist->str == NULL)
5294                 return -ENOMEM;
5295
5296         letter = arglist->str;
5297         state = 0;
5298         arglist->count = 0;
5299         pair = &arglist->pairs[0];
5300         while (1) {
5301                 switch (state) {
5302                 case 0: /* Initial */
5303                         if (*letter == '=')
5304                                 return -EINVAL;
5305                         else if (*letter == '\0')
5306                                 return 0;
5307
5308                         state = 1;
5309                         pair->key = letter;
5310                         /* fall-thru */
5311
5312                 case 1: /* Parsing key */
5313                         if (*letter == '=') {
5314                                 *letter = '\0';
5315                                 pair->value = letter + 1;
5316                                 state = 2;
5317                         } else if (*letter == ',' || *letter == '\0')
5318                                 return -EINVAL;
5319                         break;
5320
5321
5322                 case 2: /* Parsing value */
5323                         if (*letter == '[')
5324                                 state = 3;
5325                         else if (*letter == ',') {
5326                                 *letter = '\0';
5327                                 arglist->count++;
5328                                 pair = &arglist->pairs[arglist->count];
5329                                 state = 0;
5330                         } else if (*letter == '\0') {
5331                                 letter--;
5332                                 arglist->count++;
5333                                 pair = &arglist->pairs[arglist->count];
5334                                 state = 0;
5335                         }
5336                         break;
5337
5338                 case 3: /* Parsing list */
5339                         if (*letter == ']')
5340                                 state = 2;
5341                         else if (*letter == '\0')
5342                                 return -EINVAL;
5343                         break;
5344                 }
5345                 letter++;
5346         }
5347 }
5348
5349 int
5350 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5351 {
5352         struct rte_kvargs args;
5353         struct rte_kvargs_pair *pair;
5354         unsigned int i;
5355         int result = 0;
5356
5357         memset(eth_da, 0, sizeof(*eth_da));
5358
5359         result = rte_eth_devargs_tokenise(&args, dargs);
5360         if (result < 0)
5361                 goto parse_cleanup;
5362
5363         for (i = 0; i < args.count; i++) {
5364                 pair = &args.pairs[i];
5365                 if (strcmp("representor", pair->key) == 0) {
5366                         result = rte_eth_devargs_parse_list(pair->value,
5367                                 rte_eth_devargs_parse_representor_ports,
5368                                 eth_da);
5369                         if (result < 0)
5370                                 goto parse_cleanup;
5371                 }
5372         }
5373
5374 parse_cleanup:
5375         if (args.str)
5376                 free(args.str);
5377
5378         return result;
5379 }
5380
5381 static int
5382 handle_port_list(const char *cmd __rte_unused,
5383                 const char *params __rte_unused,
5384                 struct rte_tel_data *d)
5385 {
5386         int port_id;
5387
5388         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5389         RTE_ETH_FOREACH_DEV(port_id)
5390                 rte_tel_data_add_array_int(d, port_id);
5391         return 0;
5392 }
5393
5394 static void
5395 add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5396                 const char *stat_name)
5397 {
5398         int q;
5399         struct rte_tel_data *q_data = rte_tel_data_alloc();
5400         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5401         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5402                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
5403         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5404 }
5405
5406 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5407
5408 static int
5409 handle_port_stats(const char *cmd __rte_unused,
5410                 const char *params,
5411                 struct rte_tel_data *d)
5412 {
5413         struct rte_eth_stats stats;
5414         int port_id, ret;
5415
5416         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5417                 return -1;
5418
5419         port_id = atoi(params);
5420         if (!rte_eth_dev_is_valid_port(port_id))
5421                 return -1;
5422
5423         ret = rte_eth_stats_get(port_id, &stats);
5424         if (ret < 0)
5425                 return -1;
5426
5427         rte_tel_data_start_dict(d);
5428         ADD_DICT_STAT(stats, ipackets);
5429         ADD_DICT_STAT(stats, opackets);
5430         ADD_DICT_STAT(stats, ibytes);
5431         ADD_DICT_STAT(stats, obytes);
5432         ADD_DICT_STAT(stats, imissed);
5433         ADD_DICT_STAT(stats, ierrors);
5434         ADD_DICT_STAT(stats, oerrors);
5435         ADD_DICT_STAT(stats, rx_nombuf);
5436         add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5437         add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5438         add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5439         add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5440         add_port_queue_stats(d, stats.q_errors, "q_errors");
5441
5442         return 0;
5443 }
5444
5445 static int
5446 handle_port_xstats(const char *cmd __rte_unused,
5447                 const char *params,
5448                 struct rte_tel_data *d)
5449 {
5450         struct rte_eth_xstat *eth_xstats;
5451         struct rte_eth_xstat_name *xstat_names;
5452         int port_id, num_xstats;
5453         int i, ret;
5454         char *end_param;
5455
5456         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5457                 return -1;
5458
5459         port_id = strtoul(params, &end_param, 0);
5460         if (*end_param != '\0')
5461                 RTE_ETHDEV_LOG(NOTICE,
5462                         "Extra parameters passed to ethdev telemetry command, ignoring");
5463         if (!rte_eth_dev_is_valid_port(port_id))
5464                 return -1;
5465
5466         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5467         if (num_xstats < 0)
5468                 return -1;
5469
5470         /* use one malloc for both names and stats */
5471         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5472                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
5473         if (eth_xstats == NULL)
5474                 return -1;
5475         xstat_names = (void *)&eth_xstats[num_xstats];
5476
5477         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5478         if (ret < 0 || ret > num_xstats) {
5479                 free(eth_xstats);
5480                 return -1;
5481         }
5482
5483         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5484         if (ret < 0 || ret > num_xstats) {
5485                 free(eth_xstats);
5486                 return -1;
5487         }
5488
5489         rte_tel_data_start_dict(d);
5490         for (i = 0; i < num_xstats; i++)
5491                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5492                                 eth_xstats[i].value);
5493         return 0;
5494 }
5495
5496 static int
5497 handle_port_link_status(const char *cmd __rte_unused,
5498                 const char *params,
5499                 struct rte_tel_data *d)
5500 {
5501         static const char *status_str = "status";
5502         int ret, port_id;
5503         struct rte_eth_link link;
5504         char *end_param;
5505
5506         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5507                 return -1;
5508
5509         port_id = strtoul(params, &end_param, 0);
5510         if (*end_param != '\0')
5511                 RTE_ETHDEV_LOG(NOTICE,
5512                         "Extra parameters passed to ethdev telemetry command, ignoring");
5513         if (!rte_eth_dev_is_valid_port(port_id))
5514                 return -1;
5515
5516         ret = rte_eth_link_get(port_id, &link);
5517         if (ret < 0)
5518                 return -1;
5519
5520         rte_tel_data_start_dict(d);
5521         if (!link.link_status) {
5522                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
5523                 return 0;
5524         }
5525         rte_tel_data_add_dict_string(d, status_str, "UP");
5526         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5527         rte_tel_data_add_dict_string(d, "duplex",
5528                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
5529                                 "full-duplex" : "half-duplex");
5530         return 0;
5531 }
5532
5533 RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
5534
5535 RTE_INIT(ethdev_init_telemetry)
5536 {
5537         rte_telemetry_register_cmd("/ethdev/list", handle_port_list,
5538                         "Returns list of available ethdev ports. Takes no parameters");
5539         rte_telemetry_register_cmd("/ethdev/stats", handle_port_stats,
5540                         "Returns the common stats for a port. Parameters: int port_id");
5541         rte_telemetry_register_cmd("/ethdev/xstats", handle_port_xstats,
5542                         "Returns the extended stats for a port. Parameters: int port_id");
5543         rte_telemetry_register_cmd("/ethdev/link_status",
5544                         handle_port_link_status,
5545                         "Returns the link status for a port. Parameters: int port_id");
5546 }