ethdev: make flow API thread safe
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40 #include <rte_ether.h>
41 #include <rte_telemetry.h>
42
43 #include "rte_ethdev_trace.h"
44 #include "rte_ethdev.h"
45 #include "rte_ethdev_driver.h"
46 #include "ethdev_profile.h"
47 #include "ethdev_private.h"
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS RTE_DIM(rte_stats_strings)
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS RTE_DIM(rte_rxq_stats_strings)
98
99 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
100         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
101         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
102 };
103 #define RTE_NB_TXQ_STATS RTE_DIM(rte_txq_stats_strings)
104
105 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
106         { DEV_RX_OFFLOAD_##_name, #_name }
107
108 static const struct {
109         uint64_t offload;
110         const char *name;
111 } rte_rx_offload_names[] = {
112         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
113         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
115         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
117         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
119         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
121         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
122         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
123         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
124         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
125         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
126         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
127         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
128         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
129         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
130         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
131 };
132
133 #undef RTE_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } rte_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
163         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
164 };
165
166 #undef RTE_TX_OFFLOAD_BIT2STR
167
168 /**
169  * The user application callback description.
170  *
171  * It contains callback address to be registered by user application,
172  * the pointer to the parameters for callback, and the event type.
173  */
174 struct rte_eth_dev_callback {
175         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
176         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
177         void *cb_arg;                           /**< Parameter for callback */
178         void *ret_param;                        /**< Return parameter */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 int
189 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
190 {
191         int ret;
192         struct rte_devargs devargs = {.args = NULL};
193         const char *bus_param_key;
194         char *bus_str = NULL;
195         char *cls_str = NULL;
196         int str_size;
197
198         memset(iter, 0, sizeof(*iter));
199
200         /*
201          * The devargs string may use various syntaxes:
202          *   - 0000:08:00.0,representor=[1-3]
203          *   - pci:0000:06:00.0,representor=[0,5]
204          *   - class=eth,mac=00:11:22:33:44:55
205          * A new syntax is in development (not yet supported):
206          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
207          */
208
209         /*
210          * Handle pure class filter (i.e. without any bus-level argument),
211          * from future new syntax.
212          * rte_devargs_parse() is not yet supporting the new syntax,
213          * that's why this simple case is temporarily parsed here.
214          */
215 #define iter_anybus_str "class=eth,"
216         if (strncmp(devargs_str, iter_anybus_str,
217                         strlen(iter_anybus_str)) == 0) {
218                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
219                 goto end;
220         }
221
222         /* Split bus, device and parameters. */
223         ret = rte_devargs_parse(&devargs, devargs_str);
224         if (ret != 0)
225                 goto error;
226
227         /*
228          * Assume parameters of old syntax can match only at ethdev level.
229          * Extra parameters will be ignored, thanks to "+" prefix.
230          */
231         str_size = strlen(devargs.args) + 2;
232         cls_str = malloc(str_size);
233         if (cls_str == NULL) {
234                 ret = -ENOMEM;
235                 goto error;
236         }
237         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
238         if (ret != str_size - 1) {
239                 ret = -EINVAL;
240                 goto error;
241         }
242         iter->cls_str = cls_str;
243         free(devargs.args); /* allocated by rte_devargs_parse() */
244         devargs.args = NULL;
245
246         iter->bus = devargs.bus;
247         if (iter->bus->dev_iterate == NULL) {
248                 ret = -ENOTSUP;
249                 goto error;
250         }
251
252         /* Convert bus args to new syntax for use with new API dev_iterate. */
253         if (strcmp(iter->bus->name, "vdev") == 0) {
254                 bus_param_key = "name";
255         } else if (strcmp(iter->bus->name, "pci") == 0) {
256                 bus_param_key = "addr";
257         } else {
258                 ret = -ENOTSUP;
259                 goto error;
260         }
261         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
262         bus_str = malloc(str_size);
263         if (bus_str == NULL) {
264                 ret = -ENOMEM;
265                 goto error;
266         }
267         ret = snprintf(bus_str, str_size, "%s=%s",
268                         bus_param_key, devargs.name);
269         if (ret != str_size - 1) {
270                 ret = -EINVAL;
271                 goto error;
272         }
273         iter->bus_str = bus_str;
274
275 end:
276         iter->cls = rte_class_find_by_name("eth");
277         return 0;
278
279 error:
280         if (ret == -ENOTSUP)
281                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
282                                 iter->bus->name);
283         free(devargs.args);
284         free(bus_str);
285         free(cls_str);
286         return ret;
287 }
288
289 uint16_t
290 rte_eth_iterator_next(struct rte_dev_iterator *iter)
291 {
292         if (iter->cls == NULL) /* invalid ethdev iterator */
293                 return RTE_MAX_ETHPORTS;
294
295         do { /* loop to try all matching rte_device */
296                 /* If not pure ethdev filter and */
297                 if (iter->bus != NULL &&
298                                 /* not in middle of rte_eth_dev iteration, */
299                                 iter->class_device == NULL) {
300                         /* get next rte_device to try. */
301                         iter->device = iter->bus->dev_iterate(
302                                         iter->device, iter->bus_str, iter);
303                         if (iter->device == NULL)
304                                 break; /* no more rte_device candidate */
305                 }
306                 /* A device is matching bus part, need to check ethdev part. */
307                 iter->class_device = iter->cls->dev_iterate(
308                                 iter->class_device, iter->cls_str, iter);
309                 if (iter->class_device != NULL)
310                         return eth_dev_to_id(iter->class_device); /* match */
311         } while (iter->bus != NULL); /* need to try next rte_device */
312
313         /* No more ethdev port to iterate. */
314         rte_eth_iterator_cleanup(iter);
315         return RTE_MAX_ETHPORTS;
316 }
317
318 void
319 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
320 {
321         if (iter->bus_str == NULL)
322                 return; /* nothing to free in pure class filter */
323         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
324         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
325         memset(iter, 0, sizeof(*iter));
326 }
327
328 uint16_t
329 rte_eth_find_next(uint16_t port_id)
330 {
331         while (port_id < RTE_MAX_ETHPORTS &&
332                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
333                 port_id++;
334
335         if (port_id >= RTE_MAX_ETHPORTS)
336                 return RTE_MAX_ETHPORTS;
337
338         return port_id;
339 }
340
341 /*
342  * Macro to iterate over all valid ports for internal usage.
343  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
344  */
345 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
346         for (port_id = rte_eth_find_next(0); \
347              port_id < RTE_MAX_ETHPORTS; \
348              port_id = rte_eth_find_next(port_id + 1))
349
350 uint16_t
351 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
352 {
353         port_id = rte_eth_find_next(port_id);
354         while (port_id < RTE_MAX_ETHPORTS &&
355                         rte_eth_devices[port_id].device != parent)
356                 port_id = rte_eth_find_next(port_id + 1);
357
358         return port_id;
359 }
360
361 uint16_t
362 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
363 {
364         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
365         return rte_eth_find_next_of(port_id,
366                         rte_eth_devices[ref_port_id].device);
367 }
368
369 static void
370 rte_eth_dev_shared_data_prepare(void)
371 {
372         const unsigned flags = 0;
373         const struct rte_memzone *mz;
374
375         rte_spinlock_lock(&rte_eth_shared_data_lock);
376
377         if (rte_eth_dev_shared_data == NULL) {
378                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
379                         /* Allocate port data and ownership shared memory. */
380                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
381                                         sizeof(*rte_eth_dev_shared_data),
382                                         rte_socket_id(), flags);
383                 } else
384                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
385                 if (mz == NULL)
386                         rte_panic("Cannot allocate ethdev shared data\n");
387
388                 rte_eth_dev_shared_data = mz->addr;
389                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
390                         rte_eth_dev_shared_data->next_owner_id =
391                                         RTE_ETH_DEV_NO_OWNER + 1;
392                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
393                         memset(rte_eth_dev_shared_data->data, 0,
394                                sizeof(rte_eth_dev_shared_data->data));
395                 }
396         }
397
398         rte_spinlock_unlock(&rte_eth_shared_data_lock);
399 }
400
401 static bool
402 is_allocated(const struct rte_eth_dev *ethdev)
403 {
404         return ethdev->data->name[0] != '\0';
405 }
406
407 static struct rte_eth_dev *
408 _rte_eth_dev_allocated(const char *name)
409 {
410         unsigned i;
411
412         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
413                 if (rte_eth_devices[i].data != NULL &&
414                     strcmp(rte_eth_devices[i].data->name, name) == 0)
415                         return &rte_eth_devices[i];
416         }
417         return NULL;
418 }
419
420 struct rte_eth_dev *
421 rte_eth_dev_allocated(const char *name)
422 {
423         struct rte_eth_dev *ethdev;
424
425         rte_eth_dev_shared_data_prepare();
426
427         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
428
429         ethdev = _rte_eth_dev_allocated(name);
430
431         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
432
433         return ethdev;
434 }
435
436 static uint16_t
437 rte_eth_dev_find_free_port(void)
438 {
439         unsigned i;
440
441         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
442                 /* Using shared name field to find a free port. */
443                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
444                         RTE_ASSERT(rte_eth_devices[i].state ==
445                                    RTE_ETH_DEV_UNUSED);
446                         return i;
447                 }
448         }
449         return RTE_MAX_ETHPORTS;
450 }
451
452 static struct rte_eth_dev *
453 eth_dev_get(uint16_t port_id)
454 {
455         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
456
457         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
458
459         return eth_dev;
460 }
461
462 struct rte_eth_dev *
463 rte_eth_dev_allocate(const char *name)
464 {
465         uint16_t port_id;
466         struct rte_eth_dev *eth_dev = NULL;
467         size_t name_len;
468
469         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
470         if (name_len == 0) {
471                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
472                 return NULL;
473         }
474
475         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
476                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
477                 return NULL;
478         }
479
480         rte_eth_dev_shared_data_prepare();
481
482         /* Synchronize port creation between primary and secondary threads. */
483         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
484
485         if (_rte_eth_dev_allocated(name) != NULL) {
486                 RTE_ETHDEV_LOG(ERR,
487                         "Ethernet device with name %s already allocated\n",
488                         name);
489                 goto unlock;
490         }
491
492         port_id = rte_eth_dev_find_free_port();
493         if (port_id == RTE_MAX_ETHPORTS) {
494                 RTE_ETHDEV_LOG(ERR,
495                         "Reached maximum number of Ethernet ports\n");
496                 goto unlock;
497         }
498
499         eth_dev = eth_dev_get(port_id);
500         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
501         eth_dev->data->port_id = port_id;
502         eth_dev->data->mtu = RTE_ETHER_MTU;
503         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
504
505 unlock:
506         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
507
508         return eth_dev;
509 }
510
511 /*
512  * Attach to a port already registered by the primary process, which
513  * makes sure that the same device would have the same port id both
514  * in the primary and secondary process.
515  */
516 struct rte_eth_dev *
517 rte_eth_dev_attach_secondary(const char *name)
518 {
519         uint16_t i;
520         struct rte_eth_dev *eth_dev = NULL;
521
522         rte_eth_dev_shared_data_prepare();
523
524         /* Synchronize port attachment to primary port creation and release. */
525         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
526
527         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
528                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
529                         break;
530         }
531         if (i == RTE_MAX_ETHPORTS) {
532                 RTE_ETHDEV_LOG(ERR,
533                         "Device %s is not driven by the primary process\n",
534                         name);
535         } else {
536                 eth_dev = eth_dev_get(i);
537                 RTE_ASSERT(eth_dev->data->port_id == i);
538         }
539
540         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
541         return eth_dev;
542 }
543
544 int
545 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
546 {
547         if (eth_dev == NULL)
548                 return -EINVAL;
549
550         rte_eth_dev_shared_data_prepare();
551
552         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
553                 rte_eth_dev_callback_process(eth_dev,
554                                 RTE_ETH_EVENT_DESTROY, NULL);
555
556         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
557
558         eth_dev->state = RTE_ETH_DEV_UNUSED;
559         eth_dev->device = NULL;
560         eth_dev->intr_handle = NULL;
561
562         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
563                 rte_free(eth_dev->data->rx_queues);
564                 rte_free(eth_dev->data->tx_queues);
565                 rte_free(eth_dev->data->mac_addrs);
566                 rte_free(eth_dev->data->hash_mac_addrs);
567                 rte_free(eth_dev->data->dev_private);
568                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
569                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
570         }
571
572         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
573
574         return 0;
575 }
576
577 int
578 rte_eth_dev_is_valid_port(uint16_t port_id)
579 {
580         if (port_id >= RTE_MAX_ETHPORTS ||
581             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
582                 return 0;
583         else
584                 return 1;
585 }
586
587 static int
588 rte_eth_is_valid_owner_id(uint64_t owner_id)
589 {
590         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
591             rte_eth_dev_shared_data->next_owner_id <= owner_id)
592                 return 0;
593         return 1;
594 }
595
596 uint64_t
597 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
598 {
599         port_id = rte_eth_find_next(port_id);
600         while (port_id < RTE_MAX_ETHPORTS &&
601                         rte_eth_devices[port_id].data->owner.id != owner_id)
602                 port_id = rte_eth_find_next(port_id + 1);
603
604         return port_id;
605 }
606
607 int
608 rte_eth_dev_owner_new(uint64_t *owner_id)
609 {
610         rte_eth_dev_shared_data_prepare();
611
612         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
613
614         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
615
616         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
617         return 0;
618 }
619
620 static int
621 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
622                        const struct rte_eth_dev_owner *new_owner)
623 {
624         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
625         struct rte_eth_dev_owner *port_owner;
626
627         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
628                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
629                         port_id);
630                 return -ENODEV;
631         }
632
633         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
634             !rte_eth_is_valid_owner_id(old_owner_id)) {
635                 RTE_ETHDEV_LOG(ERR,
636                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
637                        old_owner_id, new_owner->id);
638                 return -EINVAL;
639         }
640
641         port_owner = &rte_eth_devices[port_id].data->owner;
642         if (port_owner->id != old_owner_id) {
643                 RTE_ETHDEV_LOG(ERR,
644                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
645                         port_id, port_owner->name, port_owner->id);
646                 return -EPERM;
647         }
648
649         /* can not truncate (same structure) */
650         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
651
652         port_owner->id = new_owner->id;
653
654         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
655                 port_id, new_owner->name, new_owner->id);
656
657         return 0;
658 }
659
660 int
661 rte_eth_dev_owner_set(const uint16_t port_id,
662                       const struct rte_eth_dev_owner *owner)
663 {
664         int ret;
665
666         rte_eth_dev_shared_data_prepare();
667
668         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
669
670         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
671
672         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
673         return ret;
674 }
675
676 int
677 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
678 {
679         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
680                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
681         int ret;
682
683         rte_eth_dev_shared_data_prepare();
684
685         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
686
687         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
688
689         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
690         return ret;
691 }
692
693 int
694 rte_eth_dev_owner_delete(const uint64_t owner_id)
695 {
696         uint16_t port_id;
697         int ret = 0;
698
699         rte_eth_dev_shared_data_prepare();
700
701         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
702
703         if (rte_eth_is_valid_owner_id(owner_id)) {
704                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
705                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
706                                 memset(&rte_eth_devices[port_id].data->owner, 0,
707                                        sizeof(struct rte_eth_dev_owner));
708                 RTE_ETHDEV_LOG(NOTICE,
709                         "All port owners owned by %016"PRIx64" identifier have removed\n",
710                         owner_id);
711         } else {
712                 RTE_ETHDEV_LOG(ERR,
713                                "Invalid owner id=%016"PRIx64"\n",
714                                owner_id);
715                 ret = -EINVAL;
716         }
717
718         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
719
720         return ret;
721 }
722
723 int
724 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
725 {
726         int ret = 0;
727         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
728
729         rte_eth_dev_shared_data_prepare();
730
731         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
732
733         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
734                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
735                         port_id);
736                 ret = -ENODEV;
737         } else {
738                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
739         }
740
741         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
742         return ret;
743 }
744
745 int
746 rte_eth_dev_socket_id(uint16_t port_id)
747 {
748         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
749         return rte_eth_devices[port_id].data->numa_node;
750 }
751
752 void *
753 rte_eth_dev_get_sec_ctx(uint16_t port_id)
754 {
755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
756         return rte_eth_devices[port_id].security_ctx;
757 }
758
759 uint16_t
760 rte_eth_dev_count_avail(void)
761 {
762         uint16_t p;
763         uint16_t count;
764
765         count = 0;
766
767         RTE_ETH_FOREACH_DEV(p)
768                 count++;
769
770         return count;
771 }
772
773 uint16_t
774 rte_eth_dev_count_total(void)
775 {
776         uint16_t port, count = 0;
777
778         RTE_ETH_FOREACH_VALID_DEV(port)
779                 count++;
780
781         return count;
782 }
783
784 int
785 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
786 {
787         char *tmp;
788
789         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
790
791         if (name == NULL) {
792                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
793                 return -EINVAL;
794         }
795
796         /* shouldn't check 'rte_eth_devices[i].data',
797          * because it might be overwritten by VDEV PMD */
798         tmp = rte_eth_dev_shared_data->data[port_id].name;
799         strcpy(name, tmp);
800         return 0;
801 }
802
803 int
804 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
805 {
806         uint32_t pid;
807
808         if (name == NULL) {
809                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
810                 return -EINVAL;
811         }
812
813         RTE_ETH_FOREACH_VALID_DEV(pid)
814                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
815                         *port_id = pid;
816                         return 0;
817                 }
818
819         return -ENODEV;
820 }
821
822 static int
823 eth_err(uint16_t port_id, int ret)
824 {
825         if (ret == 0)
826                 return 0;
827         if (rte_eth_dev_is_removed(port_id))
828                 return -EIO;
829         return ret;
830 }
831
832 static int
833 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
834 {
835         uint16_t old_nb_queues = dev->data->nb_rx_queues;
836         void **rxq;
837         unsigned i;
838
839         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
840                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
841                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
842                                 RTE_CACHE_LINE_SIZE);
843                 if (dev->data->rx_queues == NULL) {
844                         dev->data->nb_rx_queues = 0;
845                         return -(ENOMEM);
846                 }
847         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
848                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
849
850                 rxq = dev->data->rx_queues;
851
852                 for (i = nb_queues; i < old_nb_queues; i++)
853                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
854                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
855                                 RTE_CACHE_LINE_SIZE);
856                 if (rxq == NULL)
857                         return -(ENOMEM);
858                 if (nb_queues > old_nb_queues) {
859                         uint16_t new_qs = nb_queues - old_nb_queues;
860
861                         memset(rxq + old_nb_queues, 0,
862                                 sizeof(rxq[0]) * new_qs);
863                 }
864
865                 dev->data->rx_queues = rxq;
866
867         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
868                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
869
870                 rxq = dev->data->rx_queues;
871
872                 for (i = nb_queues; i < old_nb_queues; i++)
873                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
874
875                 rte_free(dev->data->rx_queues);
876                 dev->data->rx_queues = NULL;
877         }
878         dev->data->nb_rx_queues = nb_queues;
879         return 0;
880 }
881
882 int
883 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
884 {
885         struct rte_eth_dev *dev;
886
887         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
888
889         dev = &rte_eth_devices[port_id];
890         if (!dev->data->dev_started) {
891                 RTE_ETHDEV_LOG(ERR,
892                         "Port %u must be started before start any queue\n",
893                         port_id);
894                 return -EINVAL;
895         }
896
897         if (rx_queue_id >= dev->data->nb_rx_queues) {
898                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
899                 return -EINVAL;
900         }
901
902         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
903
904         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
905                 RTE_ETHDEV_LOG(INFO,
906                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
907                         rx_queue_id, port_id);
908                 return -EINVAL;
909         }
910
911         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
912                 RTE_ETHDEV_LOG(INFO,
913                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
914                         rx_queue_id, port_id);
915                 return 0;
916         }
917
918         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
919                                                              rx_queue_id));
920
921 }
922
923 int
924 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
925 {
926         struct rte_eth_dev *dev;
927
928         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
929
930         dev = &rte_eth_devices[port_id];
931         if (rx_queue_id >= dev->data->nb_rx_queues) {
932                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
933                 return -EINVAL;
934         }
935
936         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
937
938         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
939                 RTE_ETHDEV_LOG(INFO,
940                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
941                         rx_queue_id, port_id);
942                 return -EINVAL;
943         }
944
945         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
946                 RTE_ETHDEV_LOG(INFO,
947                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
948                         rx_queue_id, port_id);
949                 return 0;
950         }
951
952         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
953
954 }
955
956 int
957 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
958 {
959         struct rte_eth_dev *dev;
960
961         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
962
963         dev = &rte_eth_devices[port_id];
964         if (!dev->data->dev_started) {
965                 RTE_ETHDEV_LOG(ERR,
966                         "Port %u must be started before start any queue\n",
967                         port_id);
968                 return -EINVAL;
969         }
970
971         if (tx_queue_id >= dev->data->nb_tx_queues) {
972                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
973                 return -EINVAL;
974         }
975
976         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
977
978         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
979                 RTE_ETHDEV_LOG(INFO,
980                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
981                         tx_queue_id, port_id);
982                 return -EINVAL;
983         }
984
985         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
986                 RTE_ETHDEV_LOG(INFO,
987                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
988                         tx_queue_id, port_id);
989                 return 0;
990         }
991
992         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
993 }
994
995 int
996 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
997 {
998         struct rte_eth_dev *dev;
999
1000         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1001
1002         dev = &rte_eth_devices[port_id];
1003         if (tx_queue_id >= dev->data->nb_tx_queues) {
1004                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1005                 return -EINVAL;
1006         }
1007
1008         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1009
1010         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1011                 RTE_ETHDEV_LOG(INFO,
1012                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1013                         tx_queue_id, port_id);
1014                 return -EINVAL;
1015         }
1016
1017         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1018                 RTE_ETHDEV_LOG(INFO,
1019                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1020                         tx_queue_id, port_id);
1021                 return 0;
1022         }
1023
1024         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1025
1026 }
1027
1028 static int
1029 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1030 {
1031         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1032         void **txq;
1033         unsigned i;
1034
1035         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1036                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1037                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1038                                                    RTE_CACHE_LINE_SIZE);
1039                 if (dev->data->tx_queues == NULL) {
1040                         dev->data->nb_tx_queues = 0;
1041                         return -(ENOMEM);
1042                 }
1043         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1044                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1045
1046                 txq = dev->data->tx_queues;
1047
1048                 for (i = nb_queues; i < old_nb_queues; i++)
1049                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1050                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1051                                   RTE_CACHE_LINE_SIZE);
1052                 if (txq == NULL)
1053                         return -ENOMEM;
1054                 if (nb_queues > old_nb_queues) {
1055                         uint16_t new_qs = nb_queues - old_nb_queues;
1056
1057                         memset(txq + old_nb_queues, 0,
1058                                sizeof(txq[0]) * new_qs);
1059                 }
1060
1061                 dev->data->tx_queues = txq;
1062
1063         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1064                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1065
1066                 txq = dev->data->tx_queues;
1067
1068                 for (i = nb_queues; i < old_nb_queues; i++)
1069                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1070
1071                 rte_free(dev->data->tx_queues);
1072                 dev->data->tx_queues = NULL;
1073         }
1074         dev->data->nb_tx_queues = nb_queues;
1075         return 0;
1076 }
1077
1078 uint32_t
1079 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1080 {
1081         switch (speed) {
1082         case ETH_SPEED_NUM_10M:
1083                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1084         case ETH_SPEED_NUM_100M:
1085                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1086         case ETH_SPEED_NUM_1G:
1087                 return ETH_LINK_SPEED_1G;
1088         case ETH_SPEED_NUM_2_5G:
1089                 return ETH_LINK_SPEED_2_5G;
1090         case ETH_SPEED_NUM_5G:
1091                 return ETH_LINK_SPEED_5G;
1092         case ETH_SPEED_NUM_10G:
1093                 return ETH_LINK_SPEED_10G;
1094         case ETH_SPEED_NUM_20G:
1095                 return ETH_LINK_SPEED_20G;
1096         case ETH_SPEED_NUM_25G:
1097                 return ETH_LINK_SPEED_25G;
1098         case ETH_SPEED_NUM_40G:
1099                 return ETH_LINK_SPEED_40G;
1100         case ETH_SPEED_NUM_50G:
1101                 return ETH_LINK_SPEED_50G;
1102         case ETH_SPEED_NUM_56G:
1103                 return ETH_LINK_SPEED_56G;
1104         case ETH_SPEED_NUM_100G:
1105                 return ETH_LINK_SPEED_100G;
1106         case ETH_SPEED_NUM_200G:
1107                 return ETH_LINK_SPEED_200G;
1108         default:
1109                 return 0;
1110         }
1111 }
1112
1113 const char *
1114 rte_eth_dev_rx_offload_name(uint64_t offload)
1115 {
1116         const char *name = "UNKNOWN";
1117         unsigned int i;
1118
1119         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1120                 if (offload == rte_rx_offload_names[i].offload) {
1121                         name = rte_rx_offload_names[i].name;
1122                         break;
1123                 }
1124         }
1125
1126         return name;
1127 }
1128
1129 const char *
1130 rte_eth_dev_tx_offload_name(uint64_t offload)
1131 {
1132         const char *name = "UNKNOWN";
1133         unsigned int i;
1134
1135         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1136                 if (offload == rte_tx_offload_names[i].offload) {
1137                         name = rte_tx_offload_names[i].name;
1138                         break;
1139                 }
1140         }
1141
1142         return name;
1143 }
1144
1145 static inline int
1146 check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1147                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1148 {
1149         int ret = 0;
1150
1151         if (dev_info_size == 0) {
1152                 if (config_size != max_rx_pkt_len) {
1153                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1154                                        " %u != %u is not allowed\n",
1155                                        port_id, config_size, max_rx_pkt_len);
1156                         ret = -EINVAL;
1157                 }
1158         } else if (config_size > dev_info_size) {
1159                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1160                                "> max allowed value %u\n", port_id, config_size,
1161                                dev_info_size);
1162                 ret = -EINVAL;
1163         } else if (config_size < RTE_ETHER_MIN_LEN) {
1164                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1165                                "< min allowed value %u\n", port_id, config_size,
1166                                (unsigned int)RTE_ETHER_MIN_LEN);
1167                 ret = -EINVAL;
1168         }
1169         return ret;
1170 }
1171
1172 /*
1173  * Validate offloads that are requested through rte_eth_dev_configure against
1174  * the offloads successfully set by the ethernet device.
1175  *
1176  * @param port_id
1177  *   The port identifier of the Ethernet device.
1178  * @param req_offloads
1179  *   The offloads that have been requested through `rte_eth_dev_configure`.
1180  * @param set_offloads
1181  *   The offloads successfully set by the ethernet device.
1182  * @param offload_type
1183  *   The offload type i.e. Rx/Tx string.
1184  * @param offload_name
1185  *   The function that prints the offload name.
1186  * @return
1187  *   - (0) if validation successful.
1188  *   - (-EINVAL) if requested offload has been silently disabled.
1189  *
1190  */
1191 static int
1192 validate_offloads(uint16_t port_id, uint64_t req_offloads,
1193                   uint64_t set_offloads, const char *offload_type,
1194                   const char *(*offload_name)(uint64_t))
1195 {
1196         uint64_t offloads_diff = req_offloads ^ set_offloads;
1197         uint64_t offload;
1198         int ret = 0;
1199
1200         while (offloads_diff != 0) {
1201                 /* Check if any offload is requested but not enabled. */
1202                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1203                 if (offload & req_offloads) {
1204                         RTE_ETHDEV_LOG(ERR,
1205                                 "Port %u failed to enable %s offload %s\n",
1206                                 port_id, offload_type, offload_name(offload));
1207                         ret = -EINVAL;
1208                 }
1209
1210                 /* Check if offload couldn't be disabled. */
1211                 if (offload & set_offloads) {
1212                         RTE_ETHDEV_LOG(DEBUG,
1213                                 "Port %u %s offload %s is not requested but enabled\n",
1214                                 port_id, offload_type, offload_name(offload));
1215                 }
1216
1217                 offloads_diff &= ~offload;
1218         }
1219
1220         return ret;
1221 }
1222
1223 int
1224 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1225                       const struct rte_eth_conf *dev_conf)
1226 {
1227         struct rte_eth_dev *dev;
1228         struct rte_eth_dev_info dev_info;
1229         struct rte_eth_conf orig_conf;
1230         int diag;
1231         int ret;
1232
1233         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1234
1235         dev = &rte_eth_devices[port_id];
1236
1237         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1238
1239         if (dev->data->dev_started) {
1240                 RTE_ETHDEV_LOG(ERR,
1241                         "Port %u must be stopped to allow configuration\n",
1242                         port_id);
1243                 return -EBUSY;
1244         }
1245
1246          /* Store original config, as rollback required on failure */
1247         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1248
1249         /*
1250          * Copy the dev_conf parameter into the dev structure.
1251          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1252          */
1253         if (dev_conf != &dev->data->dev_conf)
1254                 memcpy(&dev->data->dev_conf, dev_conf,
1255                        sizeof(dev->data->dev_conf));
1256
1257         ret = rte_eth_dev_info_get(port_id, &dev_info);
1258         if (ret != 0)
1259                 goto rollback;
1260
1261         /* If number of queues specified by application for both Rx and Tx is
1262          * zero, use driver preferred values. This cannot be done individually
1263          * as it is valid for either Tx or Rx (but not both) to be zero.
1264          * If driver does not provide any preferred valued, fall back on
1265          * EAL defaults.
1266          */
1267         if (nb_rx_q == 0 && nb_tx_q == 0) {
1268                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1269                 if (nb_rx_q == 0)
1270                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1271                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1272                 if (nb_tx_q == 0)
1273                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1274         }
1275
1276         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1277                 RTE_ETHDEV_LOG(ERR,
1278                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1279                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1280                 ret = -EINVAL;
1281                 goto rollback;
1282         }
1283
1284         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1285                 RTE_ETHDEV_LOG(ERR,
1286                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1287                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1288                 ret = -EINVAL;
1289                 goto rollback;
1290         }
1291
1292         /*
1293          * Check that the numbers of RX and TX queues are not greater
1294          * than the maximum number of RX and TX queues supported by the
1295          * configured device.
1296          */
1297         if (nb_rx_q > dev_info.max_rx_queues) {
1298                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1299                         port_id, nb_rx_q, dev_info.max_rx_queues);
1300                 ret = -EINVAL;
1301                 goto rollback;
1302         }
1303
1304         if (nb_tx_q > dev_info.max_tx_queues) {
1305                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1306                         port_id, nb_tx_q, dev_info.max_tx_queues);
1307                 ret = -EINVAL;
1308                 goto rollback;
1309         }
1310
1311         /* Check that the device supports requested interrupts */
1312         if ((dev_conf->intr_conf.lsc == 1) &&
1313                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1314                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1315                         dev->device->driver->name);
1316                 ret = -EINVAL;
1317                 goto rollback;
1318         }
1319         if ((dev_conf->intr_conf.rmv == 1) &&
1320                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1321                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1322                         dev->device->driver->name);
1323                 ret = -EINVAL;
1324                 goto rollback;
1325         }
1326
1327         /*
1328          * If jumbo frames are enabled, check that the maximum RX packet
1329          * length is supported by the configured device.
1330          */
1331         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1332                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1333                         RTE_ETHDEV_LOG(ERR,
1334                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1335                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1336                                 dev_info.max_rx_pktlen);
1337                         ret = -EINVAL;
1338                         goto rollback;
1339                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1340                         RTE_ETHDEV_LOG(ERR,
1341                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1342                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1343                                 (unsigned int)RTE_ETHER_MIN_LEN);
1344                         ret = -EINVAL;
1345                         goto rollback;
1346                 }
1347         } else {
1348                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1349                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1350                         /* Use default value */
1351                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1352                                                         RTE_ETHER_MAX_LEN;
1353         }
1354
1355         /*
1356          * If LRO is enabled, check that the maximum aggregated packet
1357          * size is supported by the configured device.
1358          */
1359         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1360                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1361                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1362                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1363                 ret = check_lro_pkt_size(port_id,
1364                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1365                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1366                                 dev_info.max_lro_pkt_size);
1367                 if (ret != 0)
1368                         goto rollback;
1369         }
1370
1371         /* Any requested offloading must be within its device capabilities */
1372         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1373              dev_conf->rxmode.offloads) {
1374                 RTE_ETHDEV_LOG(ERR,
1375                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1376                         "capabilities 0x%"PRIx64" in %s()\n",
1377                         port_id, dev_conf->rxmode.offloads,
1378                         dev_info.rx_offload_capa,
1379                         __func__);
1380                 ret = -EINVAL;
1381                 goto rollback;
1382         }
1383         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1384              dev_conf->txmode.offloads) {
1385                 RTE_ETHDEV_LOG(ERR,
1386                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1387                         "capabilities 0x%"PRIx64" in %s()\n",
1388                         port_id, dev_conf->txmode.offloads,
1389                         dev_info.tx_offload_capa,
1390                         __func__);
1391                 ret = -EINVAL;
1392                 goto rollback;
1393         }
1394
1395         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1396                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1397
1398         /* Check that device supports requested rss hash functions. */
1399         if ((dev_info.flow_type_rss_offloads |
1400              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1401             dev_info.flow_type_rss_offloads) {
1402                 RTE_ETHDEV_LOG(ERR,
1403                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1404                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1405                         dev_info.flow_type_rss_offloads);
1406                 ret = -EINVAL;
1407                 goto rollback;
1408         }
1409
1410         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1411         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1412             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1413                 RTE_ETHDEV_LOG(ERR,
1414                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1415                         port_id,
1416                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1417                 ret = -EINVAL;
1418                 goto rollback;
1419         }
1420
1421         /*
1422          * Setup new number of RX/TX queues and reconfigure device.
1423          */
1424         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1425         if (diag != 0) {
1426                 RTE_ETHDEV_LOG(ERR,
1427                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1428                         port_id, diag);
1429                 ret = diag;
1430                 goto rollback;
1431         }
1432
1433         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1434         if (diag != 0) {
1435                 RTE_ETHDEV_LOG(ERR,
1436                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1437                         port_id, diag);
1438                 rte_eth_dev_rx_queue_config(dev, 0);
1439                 ret = diag;
1440                 goto rollback;
1441         }
1442
1443         diag = (*dev->dev_ops->dev_configure)(dev);
1444         if (diag != 0) {
1445                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1446                         port_id, diag);
1447                 ret = eth_err(port_id, diag);
1448                 goto reset_queues;
1449         }
1450
1451         /* Initialize Rx profiling if enabled at compilation time. */
1452         diag = __rte_eth_dev_profile_init(port_id, dev);
1453         if (diag != 0) {
1454                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1455                         port_id, diag);
1456                 ret = eth_err(port_id, diag);
1457                 goto reset_queues;
1458         }
1459
1460         /* Validate Rx offloads. */
1461         diag = validate_offloads(port_id,
1462                         dev_conf->rxmode.offloads,
1463                         dev->data->dev_conf.rxmode.offloads, "Rx",
1464                         rte_eth_dev_rx_offload_name);
1465         if (diag != 0) {
1466                 ret = diag;
1467                 goto reset_queues;
1468         }
1469
1470         /* Validate Tx offloads. */
1471         diag = validate_offloads(port_id,
1472                         dev_conf->txmode.offloads,
1473                         dev->data->dev_conf.txmode.offloads, "Tx",
1474                         rte_eth_dev_tx_offload_name);
1475         if (diag != 0) {
1476                 ret = diag;
1477                 goto reset_queues;
1478         }
1479
1480         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1481         return 0;
1482 reset_queues:
1483         rte_eth_dev_rx_queue_config(dev, 0);
1484         rte_eth_dev_tx_queue_config(dev, 0);
1485 rollback:
1486         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1487
1488         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1489         return ret;
1490 }
1491
1492 void
1493 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1494 {
1495         if (dev->data->dev_started) {
1496                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1497                         dev->data->port_id);
1498                 return;
1499         }
1500
1501         rte_eth_dev_rx_queue_config(dev, 0);
1502         rte_eth_dev_tx_queue_config(dev, 0);
1503
1504         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1505 }
1506
1507 static void
1508 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1509                         struct rte_eth_dev_info *dev_info)
1510 {
1511         struct rte_ether_addr *addr;
1512         uint16_t i;
1513         uint32_t pool = 0;
1514         uint64_t pool_mask;
1515
1516         /* replay MAC address configuration including default MAC */
1517         addr = &dev->data->mac_addrs[0];
1518         if (*dev->dev_ops->mac_addr_set != NULL)
1519                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1520         else if (*dev->dev_ops->mac_addr_add != NULL)
1521                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1522
1523         if (*dev->dev_ops->mac_addr_add != NULL) {
1524                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1525                         addr = &dev->data->mac_addrs[i];
1526
1527                         /* skip zero address */
1528                         if (rte_is_zero_ether_addr(addr))
1529                                 continue;
1530
1531                         pool = 0;
1532                         pool_mask = dev->data->mac_pool_sel[i];
1533
1534                         do {
1535                                 if (pool_mask & 1ULL)
1536                                         (*dev->dev_ops->mac_addr_add)(dev,
1537                                                 addr, i, pool);
1538                                 pool_mask >>= 1;
1539                                 pool++;
1540                         } while (pool_mask);
1541                 }
1542         }
1543 }
1544
1545 static int
1546 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1547                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1548 {
1549         int ret;
1550
1551         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1552                 rte_eth_dev_mac_restore(dev, dev_info);
1553
1554         /* replay promiscuous configuration */
1555         /*
1556          * use callbacks directly since we don't need port_id check and
1557          * would like to bypass the same value set
1558          */
1559         if (rte_eth_promiscuous_get(port_id) == 1 &&
1560             *dev->dev_ops->promiscuous_enable != NULL) {
1561                 ret = eth_err(port_id,
1562                               (*dev->dev_ops->promiscuous_enable)(dev));
1563                 if (ret != 0 && ret != -ENOTSUP) {
1564                         RTE_ETHDEV_LOG(ERR,
1565                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1566                                 port_id, rte_strerror(-ret));
1567                         return ret;
1568                 }
1569         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1570                    *dev->dev_ops->promiscuous_disable != NULL) {
1571                 ret = eth_err(port_id,
1572                               (*dev->dev_ops->promiscuous_disable)(dev));
1573                 if (ret != 0 && ret != -ENOTSUP) {
1574                         RTE_ETHDEV_LOG(ERR,
1575                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1576                                 port_id, rte_strerror(-ret));
1577                         return ret;
1578                 }
1579         }
1580
1581         /* replay all multicast configuration */
1582         /*
1583          * use callbacks directly since we don't need port_id check and
1584          * would like to bypass the same value set
1585          */
1586         if (rte_eth_allmulticast_get(port_id) == 1 &&
1587             *dev->dev_ops->allmulticast_enable != NULL) {
1588                 ret = eth_err(port_id,
1589                               (*dev->dev_ops->allmulticast_enable)(dev));
1590                 if (ret != 0 && ret != -ENOTSUP) {
1591                         RTE_ETHDEV_LOG(ERR,
1592                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1593                                 port_id, rte_strerror(-ret));
1594                         return ret;
1595                 }
1596         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1597                    *dev->dev_ops->allmulticast_disable != NULL) {
1598                 ret = eth_err(port_id,
1599                               (*dev->dev_ops->allmulticast_disable)(dev));
1600                 if (ret != 0 && ret != -ENOTSUP) {
1601                         RTE_ETHDEV_LOG(ERR,
1602                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1603                                 port_id, rte_strerror(-ret));
1604                         return ret;
1605                 }
1606         }
1607
1608         return 0;
1609 }
1610
1611 int
1612 rte_eth_dev_start(uint16_t port_id)
1613 {
1614         struct rte_eth_dev *dev;
1615         struct rte_eth_dev_info dev_info;
1616         int diag;
1617         int ret;
1618
1619         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1620
1621         dev = &rte_eth_devices[port_id];
1622
1623         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1624
1625         if (dev->data->dev_started != 0) {
1626                 RTE_ETHDEV_LOG(INFO,
1627                         "Device with port_id=%"PRIu16" already started\n",
1628                         port_id);
1629                 return 0;
1630         }
1631
1632         ret = rte_eth_dev_info_get(port_id, &dev_info);
1633         if (ret != 0)
1634                 return ret;
1635
1636         /* Lets restore MAC now if device does not support live change */
1637         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1638                 rte_eth_dev_mac_restore(dev, &dev_info);
1639
1640         diag = (*dev->dev_ops->dev_start)(dev);
1641         if (diag == 0)
1642                 dev->data->dev_started = 1;
1643         else
1644                 return eth_err(port_id, diag);
1645
1646         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1647         if (ret != 0) {
1648                 RTE_ETHDEV_LOG(ERR,
1649                         "Error during restoring configuration for device (port %u): %s\n",
1650                         port_id, rte_strerror(-ret));
1651                 rte_eth_dev_stop(port_id);
1652                 return ret;
1653         }
1654
1655         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1656                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1657                 (*dev->dev_ops->link_update)(dev, 0);
1658         }
1659
1660         rte_ethdev_trace_start(port_id);
1661         return 0;
1662 }
1663
1664 void
1665 rte_eth_dev_stop(uint16_t port_id)
1666 {
1667         struct rte_eth_dev *dev;
1668
1669         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1670         dev = &rte_eth_devices[port_id];
1671
1672         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1673
1674         if (dev->data->dev_started == 0) {
1675                 RTE_ETHDEV_LOG(INFO,
1676                         "Device with port_id=%"PRIu16" already stopped\n",
1677                         port_id);
1678                 return;
1679         }
1680
1681         dev->data->dev_started = 0;
1682         (*dev->dev_ops->dev_stop)(dev);
1683         rte_ethdev_trace_stop(port_id);
1684 }
1685
1686 int
1687 rte_eth_dev_set_link_up(uint16_t port_id)
1688 {
1689         struct rte_eth_dev *dev;
1690
1691         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1692
1693         dev = &rte_eth_devices[port_id];
1694
1695         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1696         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1697 }
1698
1699 int
1700 rte_eth_dev_set_link_down(uint16_t port_id)
1701 {
1702         struct rte_eth_dev *dev;
1703
1704         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1705
1706         dev = &rte_eth_devices[port_id];
1707
1708         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1709         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1710 }
1711
1712 void
1713 rte_eth_dev_close(uint16_t port_id)
1714 {
1715         struct rte_eth_dev *dev;
1716
1717         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1718         dev = &rte_eth_devices[port_id];
1719
1720         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1721         dev->data->dev_started = 0;
1722         (*dev->dev_ops->dev_close)(dev);
1723
1724         rte_ethdev_trace_close(port_id);
1725         rte_eth_dev_release_port(dev);
1726 }
1727
1728 int
1729 rte_eth_dev_reset(uint16_t port_id)
1730 {
1731         struct rte_eth_dev *dev;
1732         int ret;
1733
1734         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1735         dev = &rte_eth_devices[port_id];
1736
1737         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1738
1739         rte_eth_dev_stop(port_id);
1740         ret = dev->dev_ops->dev_reset(dev);
1741
1742         return eth_err(port_id, ret);
1743 }
1744
1745 int
1746 rte_eth_dev_is_removed(uint16_t port_id)
1747 {
1748         struct rte_eth_dev *dev;
1749         int ret;
1750
1751         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1752
1753         dev = &rte_eth_devices[port_id];
1754
1755         if (dev->state == RTE_ETH_DEV_REMOVED)
1756                 return 1;
1757
1758         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1759
1760         ret = dev->dev_ops->is_removed(dev);
1761         if (ret != 0)
1762                 /* Device is physically removed. */
1763                 dev->state = RTE_ETH_DEV_REMOVED;
1764
1765         return ret;
1766 }
1767
1768 int
1769 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1770                        uint16_t nb_rx_desc, unsigned int socket_id,
1771                        const struct rte_eth_rxconf *rx_conf,
1772                        struct rte_mempool *mp)
1773 {
1774         int ret;
1775         uint32_t mbp_buf_size;
1776         struct rte_eth_dev *dev;
1777         struct rte_eth_dev_info dev_info;
1778         struct rte_eth_rxconf local_conf;
1779         void **rxq;
1780
1781         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1782
1783         dev = &rte_eth_devices[port_id];
1784         if (rx_queue_id >= dev->data->nb_rx_queues) {
1785                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1786                 return -EINVAL;
1787         }
1788
1789         if (mp == NULL) {
1790                 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1791                 return -EINVAL;
1792         }
1793
1794         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1795
1796         /*
1797          * Check the size of the mbuf data buffer.
1798          * This value must be provided in the private data of the memory pool.
1799          * First check that the memory pool has a valid private data.
1800          */
1801         ret = rte_eth_dev_info_get(port_id, &dev_info);
1802         if (ret != 0)
1803                 return ret;
1804
1805         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1806                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1807                         mp->name, (int)mp->private_data_size,
1808                         (int)sizeof(struct rte_pktmbuf_pool_private));
1809                 return -ENOSPC;
1810         }
1811         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1812
1813         if (mbp_buf_size < dev_info.min_rx_bufsize + RTE_PKTMBUF_HEADROOM) {
1814                 RTE_ETHDEV_LOG(ERR,
1815                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1816                         mp->name, (int)mbp_buf_size,
1817                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1818                         (int)RTE_PKTMBUF_HEADROOM,
1819                         (int)dev_info.min_rx_bufsize);
1820                 return -EINVAL;
1821         }
1822
1823         /* Use default specified by driver, if nb_rx_desc is zero */
1824         if (nb_rx_desc == 0) {
1825                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1826                 /* If driver default is also zero, fall back on EAL default */
1827                 if (nb_rx_desc == 0)
1828                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1829         }
1830
1831         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1832                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1833                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1834
1835                 RTE_ETHDEV_LOG(ERR,
1836                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1837                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1838                         dev_info.rx_desc_lim.nb_min,
1839                         dev_info.rx_desc_lim.nb_align);
1840                 return -EINVAL;
1841         }
1842
1843         if (dev->data->dev_started &&
1844                 !(dev_info.dev_capa &
1845                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1846                 return -EBUSY;
1847
1848         if (dev->data->dev_started &&
1849                 (dev->data->rx_queue_state[rx_queue_id] !=
1850                         RTE_ETH_QUEUE_STATE_STOPPED))
1851                 return -EBUSY;
1852
1853         rxq = dev->data->rx_queues;
1854         if (rxq[rx_queue_id]) {
1855                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1856                                         -ENOTSUP);
1857                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1858                 rxq[rx_queue_id] = NULL;
1859         }
1860
1861         if (rx_conf == NULL)
1862                 rx_conf = &dev_info.default_rxconf;
1863
1864         local_conf = *rx_conf;
1865
1866         /*
1867          * If an offloading has already been enabled in
1868          * rte_eth_dev_configure(), it has been enabled on all queues,
1869          * so there is no need to enable it in this queue again.
1870          * The local_conf.offloads input to underlying PMD only carries
1871          * those offloadings which are only enabled on this queue and
1872          * not enabled on all queues.
1873          */
1874         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1875
1876         /*
1877          * New added offloadings for this queue are those not enabled in
1878          * rte_eth_dev_configure() and they must be per-queue type.
1879          * A pure per-port offloading can't be enabled on a queue while
1880          * disabled on another queue. A pure per-port offloading can't
1881          * be enabled for any queue as new added one if it hasn't been
1882          * enabled in rte_eth_dev_configure().
1883          */
1884         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1885              local_conf.offloads) {
1886                 RTE_ETHDEV_LOG(ERR,
1887                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1888                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1889                         port_id, rx_queue_id, local_conf.offloads,
1890                         dev_info.rx_queue_offload_capa,
1891                         __func__);
1892                 return -EINVAL;
1893         }
1894
1895         /*
1896          * If LRO is enabled, check that the maximum aggregated packet
1897          * size is supported by the configured device.
1898          */
1899         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1900                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1901                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1902                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1903                 int ret = check_lro_pkt_size(port_id,
1904                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1905                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1906                                 dev_info.max_lro_pkt_size);
1907                 if (ret != 0)
1908                         return ret;
1909         }
1910
1911         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1912                                               socket_id, &local_conf, mp);
1913         if (!ret) {
1914                 if (!dev->data->min_rx_buf_size ||
1915                     dev->data->min_rx_buf_size > mbp_buf_size)
1916                         dev->data->min_rx_buf_size = mbp_buf_size;
1917         }
1918
1919         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
1920                 rx_conf, ret);
1921         return eth_err(port_id, ret);
1922 }
1923
1924 int
1925 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1926                                uint16_t nb_rx_desc,
1927                                const struct rte_eth_hairpin_conf *conf)
1928 {
1929         int ret;
1930         struct rte_eth_dev *dev;
1931         struct rte_eth_hairpin_cap cap;
1932         void **rxq;
1933         int i;
1934         int count;
1935
1936         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1937
1938         dev = &rte_eth_devices[port_id];
1939         if (rx_queue_id >= dev->data->nb_rx_queues) {
1940                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1941                 return -EINVAL;
1942         }
1943         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1944         if (ret != 0)
1945                 return ret;
1946         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1947                                 -ENOTSUP);
1948         /* if nb_rx_desc is zero use max number of desc from the driver. */
1949         if (nb_rx_desc == 0)
1950                 nb_rx_desc = cap.max_nb_desc;
1951         if (nb_rx_desc > cap.max_nb_desc) {
1952                 RTE_ETHDEV_LOG(ERR,
1953                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1954                         nb_rx_desc, cap.max_nb_desc);
1955                 return -EINVAL;
1956         }
1957         if (conf->peer_count > cap.max_rx_2_tx) {
1958                 RTE_ETHDEV_LOG(ERR,
1959                         "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
1960                         conf->peer_count, cap.max_rx_2_tx);
1961                 return -EINVAL;
1962         }
1963         if (conf->peer_count == 0) {
1964                 RTE_ETHDEV_LOG(ERR,
1965                         "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
1966                         conf->peer_count);
1967                 return -EINVAL;
1968         }
1969         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1970              cap.max_nb_queues != UINT16_MAX; i++) {
1971                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1972                         count++;
1973         }
1974         if (count > cap.max_nb_queues) {
1975                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1976                 cap.max_nb_queues);
1977                 return -EINVAL;
1978         }
1979         if (dev->data->dev_started)
1980                 return -EBUSY;
1981         rxq = dev->data->rx_queues;
1982         if (rxq[rx_queue_id] != NULL) {
1983                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1984                                         -ENOTSUP);
1985                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1986                 rxq[rx_queue_id] = NULL;
1987         }
1988         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1989                                                       nb_rx_desc, conf);
1990         if (ret == 0)
1991                 dev->data->rx_queue_state[rx_queue_id] =
1992                         RTE_ETH_QUEUE_STATE_HAIRPIN;
1993         return eth_err(port_id, ret);
1994 }
1995
1996 int
1997 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1998                        uint16_t nb_tx_desc, unsigned int socket_id,
1999                        const struct rte_eth_txconf *tx_conf)
2000 {
2001         struct rte_eth_dev *dev;
2002         struct rte_eth_dev_info dev_info;
2003         struct rte_eth_txconf local_conf;
2004         void **txq;
2005         int ret;
2006
2007         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2008
2009         dev = &rte_eth_devices[port_id];
2010         if (tx_queue_id >= dev->data->nb_tx_queues) {
2011                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2012                 return -EINVAL;
2013         }
2014
2015         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2016
2017         ret = rte_eth_dev_info_get(port_id, &dev_info);
2018         if (ret != 0)
2019                 return ret;
2020
2021         /* Use default specified by driver, if nb_tx_desc is zero */
2022         if (nb_tx_desc == 0) {
2023                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2024                 /* If driver default is zero, fall back on EAL default */
2025                 if (nb_tx_desc == 0)
2026                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2027         }
2028         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2029             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2030             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2031                 RTE_ETHDEV_LOG(ERR,
2032                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2033                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2034                         dev_info.tx_desc_lim.nb_min,
2035                         dev_info.tx_desc_lim.nb_align);
2036                 return -EINVAL;
2037         }
2038
2039         if (dev->data->dev_started &&
2040                 !(dev_info.dev_capa &
2041                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2042                 return -EBUSY;
2043
2044         if (dev->data->dev_started &&
2045                 (dev->data->tx_queue_state[tx_queue_id] !=
2046                         RTE_ETH_QUEUE_STATE_STOPPED))
2047                 return -EBUSY;
2048
2049         txq = dev->data->tx_queues;
2050         if (txq[tx_queue_id]) {
2051                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2052                                         -ENOTSUP);
2053                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2054                 txq[tx_queue_id] = NULL;
2055         }
2056
2057         if (tx_conf == NULL)
2058                 tx_conf = &dev_info.default_txconf;
2059
2060         local_conf = *tx_conf;
2061
2062         /*
2063          * If an offloading has already been enabled in
2064          * rte_eth_dev_configure(), it has been enabled on all queues,
2065          * so there is no need to enable it in this queue again.
2066          * The local_conf.offloads input to underlying PMD only carries
2067          * those offloadings which are only enabled on this queue and
2068          * not enabled on all queues.
2069          */
2070         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2071
2072         /*
2073          * New added offloadings for this queue are those not enabled in
2074          * rte_eth_dev_configure() and they must be per-queue type.
2075          * A pure per-port offloading can't be enabled on a queue while
2076          * disabled on another queue. A pure per-port offloading can't
2077          * be enabled for any queue as new added one if it hasn't been
2078          * enabled in rte_eth_dev_configure().
2079          */
2080         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2081              local_conf.offloads) {
2082                 RTE_ETHDEV_LOG(ERR,
2083                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2084                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2085                         port_id, tx_queue_id, local_conf.offloads,
2086                         dev_info.tx_queue_offload_capa,
2087                         __func__);
2088                 return -EINVAL;
2089         }
2090
2091         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2092         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2093                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2094 }
2095
2096 int
2097 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2098                                uint16_t nb_tx_desc,
2099                                const struct rte_eth_hairpin_conf *conf)
2100 {
2101         struct rte_eth_dev *dev;
2102         struct rte_eth_hairpin_cap cap;
2103         void **txq;
2104         int i;
2105         int count;
2106         int ret;
2107
2108         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2109         dev = &rte_eth_devices[port_id];
2110         if (tx_queue_id >= dev->data->nb_tx_queues) {
2111                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2112                 return -EINVAL;
2113         }
2114         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2115         if (ret != 0)
2116                 return ret;
2117         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2118                                 -ENOTSUP);
2119         /* if nb_rx_desc is zero use max number of desc from the driver. */
2120         if (nb_tx_desc == 0)
2121                 nb_tx_desc = cap.max_nb_desc;
2122         if (nb_tx_desc > cap.max_nb_desc) {
2123                 RTE_ETHDEV_LOG(ERR,
2124                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2125                         nb_tx_desc, cap.max_nb_desc);
2126                 return -EINVAL;
2127         }
2128         if (conf->peer_count > cap.max_tx_2_rx) {
2129                 RTE_ETHDEV_LOG(ERR,
2130                         "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
2131                         conf->peer_count, cap.max_tx_2_rx);
2132                 return -EINVAL;
2133         }
2134         if (conf->peer_count == 0) {
2135                 RTE_ETHDEV_LOG(ERR,
2136                         "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
2137                         conf->peer_count);
2138                 return -EINVAL;
2139         }
2140         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2141              cap.max_nb_queues != UINT16_MAX; i++) {
2142                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2143                         count++;
2144         }
2145         if (count > cap.max_nb_queues) {
2146                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2147                 cap.max_nb_queues);
2148                 return -EINVAL;
2149         }
2150         if (dev->data->dev_started)
2151                 return -EBUSY;
2152         txq = dev->data->tx_queues;
2153         if (txq[tx_queue_id] != NULL) {
2154                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2155                                         -ENOTSUP);
2156                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2157                 txq[tx_queue_id] = NULL;
2158         }
2159         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2160                 (dev, tx_queue_id, nb_tx_desc, conf);
2161         if (ret == 0)
2162                 dev->data->tx_queue_state[tx_queue_id] =
2163                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2164         return eth_err(port_id, ret);
2165 }
2166
2167 void
2168 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2169                 void *userdata __rte_unused)
2170 {
2171         rte_pktmbuf_free_bulk(pkts, unsent);
2172 }
2173
2174 void
2175 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2176                 void *userdata)
2177 {
2178         uint64_t *count = userdata;
2179
2180         rte_pktmbuf_free_bulk(pkts, unsent);
2181         *count += unsent;
2182 }
2183
2184 int
2185 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2186                 buffer_tx_error_fn cbfn, void *userdata)
2187 {
2188         buffer->error_callback = cbfn;
2189         buffer->error_userdata = userdata;
2190         return 0;
2191 }
2192
2193 int
2194 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2195 {
2196         int ret = 0;
2197
2198         if (buffer == NULL)
2199                 return -EINVAL;
2200
2201         buffer->size = size;
2202         if (buffer->error_callback == NULL) {
2203                 ret = rte_eth_tx_buffer_set_err_callback(
2204                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2205         }
2206
2207         return ret;
2208 }
2209
2210 int
2211 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2212 {
2213         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2214         int ret;
2215
2216         /* Validate Input Data. Bail if not valid or not supported. */
2217         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2218         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2219
2220         /* Call driver to free pending mbufs. */
2221         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2222                                                free_cnt);
2223         return eth_err(port_id, ret);
2224 }
2225
2226 int
2227 rte_eth_promiscuous_enable(uint16_t port_id)
2228 {
2229         struct rte_eth_dev *dev;
2230         int diag = 0;
2231
2232         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2233         dev = &rte_eth_devices[port_id];
2234
2235         if (dev->data->promiscuous == 1)
2236                 return 0;
2237
2238         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2239
2240         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2241         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2242
2243         return eth_err(port_id, diag);
2244 }
2245
2246 int
2247 rte_eth_promiscuous_disable(uint16_t port_id)
2248 {
2249         struct rte_eth_dev *dev;
2250         int diag = 0;
2251
2252         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2253         dev = &rte_eth_devices[port_id];
2254
2255         if (dev->data->promiscuous == 0)
2256                 return 0;
2257
2258         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2259
2260         dev->data->promiscuous = 0;
2261         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2262         if (diag != 0)
2263                 dev->data->promiscuous = 1;
2264
2265         return eth_err(port_id, diag);
2266 }
2267
2268 int
2269 rte_eth_promiscuous_get(uint16_t port_id)
2270 {
2271         struct rte_eth_dev *dev;
2272
2273         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2274
2275         dev = &rte_eth_devices[port_id];
2276         return dev->data->promiscuous;
2277 }
2278
2279 int
2280 rte_eth_allmulticast_enable(uint16_t port_id)
2281 {
2282         struct rte_eth_dev *dev;
2283         int diag;
2284
2285         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2286         dev = &rte_eth_devices[port_id];
2287
2288         if (dev->data->all_multicast == 1)
2289                 return 0;
2290
2291         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2292         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2293         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2294
2295         return eth_err(port_id, diag);
2296 }
2297
2298 int
2299 rte_eth_allmulticast_disable(uint16_t port_id)
2300 {
2301         struct rte_eth_dev *dev;
2302         int diag;
2303
2304         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2305         dev = &rte_eth_devices[port_id];
2306
2307         if (dev->data->all_multicast == 0)
2308                 return 0;
2309
2310         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2311         dev->data->all_multicast = 0;
2312         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2313         if (diag != 0)
2314                 dev->data->all_multicast = 1;
2315
2316         return eth_err(port_id, diag);
2317 }
2318
2319 int
2320 rte_eth_allmulticast_get(uint16_t port_id)
2321 {
2322         struct rte_eth_dev *dev;
2323
2324         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2325
2326         dev = &rte_eth_devices[port_id];
2327         return dev->data->all_multicast;
2328 }
2329
2330 int
2331 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2332 {
2333         struct rte_eth_dev *dev;
2334
2335         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2336         dev = &rte_eth_devices[port_id];
2337
2338         if (dev->data->dev_conf.intr_conf.lsc &&
2339             dev->data->dev_started)
2340                 rte_eth_linkstatus_get(dev, eth_link);
2341         else {
2342                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2343                 (*dev->dev_ops->link_update)(dev, 1);
2344                 *eth_link = dev->data->dev_link;
2345         }
2346
2347         return 0;
2348 }
2349
2350 int
2351 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2352 {
2353         struct rte_eth_dev *dev;
2354
2355         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2356         dev = &rte_eth_devices[port_id];
2357
2358         if (dev->data->dev_conf.intr_conf.lsc &&
2359             dev->data->dev_started)
2360                 rte_eth_linkstatus_get(dev, eth_link);
2361         else {
2362                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2363                 (*dev->dev_ops->link_update)(dev, 0);
2364                 *eth_link = dev->data->dev_link;
2365         }
2366
2367         return 0;
2368 }
2369
2370 const char *
2371 rte_eth_link_speed_to_str(uint32_t link_speed)
2372 {
2373         switch (link_speed) {
2374         case ETH_SPEED_NUM_NONE: return "None";
2375         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2376         case ETH_SPEED_NUM_100M: return "100 Mbps";
2377         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2378         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2379         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2380         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2381         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2382         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2383         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2384         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2385         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2386         case ETH_SPEED_NUM_100G: return "100 Gbps";
2387         case ETH_SPEED_NUM_200G: return "200 Gbps";
2388         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2389         default: return "Invalid";
2390         }
2391 }
2392
2393 int
2394 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2395 {
2396         if (eth_link->link_status == ETH_LINK_DOWN)
2397                 return snprintf(str, len, "Link down");
2398         else
2399                 return snprintf(str, len, "Link up at %s %s %s",
2400                         rte_eth_link_speed_to_str(eth_link->link_speed),
2401                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2402                         "FDX" : "HDX",
2403                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2404                         "Autoneg" : "Fixed");
2405 }
2406
2407 int
2408 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2409 {
2410         struct rte_eth_dev *dev;
2411
2412         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2413
2414         dev = &rte_eth_devices[port_id];
2415         memset(stats, 0, sizeof(*stats));
2416
2417         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2418         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2419         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2420 }
2421
2422 int
2423 rte_eth_stats_reset(uint16_t port_id)
2424 {
2425         struct rte_eth_dev *dev;
2426         int ret;
2427
2428         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2429         dev = &rte_eth_devices[port_id];
2430
2431         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2432         ret = (*dev->dev_ops->stats_reset)(dev);
2433         if (ret != 0)
2434                 return eth_err(port_id, ret);
2435
2436         dev->data->rx_mbuf_alloc_failed = 0;
2437
2438         return 0;
2439 }
2440
2441 static inline int
2442 get_xstats_basic_count(struct rte_eth_dev *dev)
2443 {
2444         uint16_t nb_rxqs, nb_txqs;
2445         int count;
2446
2447         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2448         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2449
2450         count = RTE_NB_STATS;
2451         count += nb_rxqs * RTE_NB_RXQ_STATS;
2452         count += nb_txqs * RTE_NB_TXQ_STATS;
2453
2454         return count;
2455 }
2456
2457 static int
2458 get_xstats_count(uint16_t port_id)
2459 {
2460         struct rte_eth_dev *dev;
2461         int count;
2462
2463         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2464         dev = &rte_eth_devices[port_id];
2465         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2466                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2467                                 NULL, 0);
2468                 if (count < 0)
2469                         return eth_err(port_id, count);
2470         }
2471         if (dev->dev_ops->xstats_get_names != NULL) {
2472                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2473                 if (count < 0)
2474                         return eth_err(port_id, count);
2475         } else
2476                 count = 0;
2477
2478
2479         count += get_xstats_basic_count(dev);
2480
2481         return count;
2482 }
2483
2484 int
2485 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2486                 uint64_t *id)
2487 {
2488         int cnt_xstats, idx_xstat;
2489
2490         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2491
2492         if (!id) {
2493                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2494                 return -ENOMEM;
2495         }
2496
2497         if (!xstat_name) {
2498                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2499                 return -ENOMEM;
2500         }
2501
2502         /* Get count */
2503         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2504         if (cnt_xstats  < 0) {
2505                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2506                 return -ENODEV;
2507         }
2508
2509         /* Get id-name lookup table */
2510         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2511
2512         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2513                         port_id, xstats_names, cnt_xstats, NULL)) {
2514                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2515                 return -1;
2516         }
2517
2518         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2519                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2520                         *id = idx_xstat;
2521                         return 0;
2522                 };
2523         }
2524
2525         return -EINVAL;
2526 }
2527
2528 /* retrieve basic stats names */
2529 static int
2530 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2531         struct rte_eth_xstat_name *xstats_names)
2532 {
2533         int cnt_used_entries = 0;
2534         uint32_t idx, id_queue;
2535         uint16_t num_q;
2536
2537         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2538                 strlcpy(xstats_names[cnt_used_entries].name,
2539                         rte_stats_strings[idx].name,
2540                         sizeof(xstats_names[0].name));
2541                 cnt_used_entries++;
2542         }
2543         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2544         for (id_queue = 0; id_queue < num_q; id_queue++) {
2545                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2546                         snprintf(xstats_names[cnt_used_entries].name,
2547                                 sizeof(xstats_names[0].name),
2548                                 "rx_q%u%s",
2549                                 id_queue, rte_rxq_stats_strings[idx].name);
2550                         cnt_used_entries++;
2551                 }
2552
2553         }
2554         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2555         for (id_queue = 0; id_queue < num_q; id_queue++) {
2556                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2557                         snprintf(xstats_names[cnt_used_entries].name,
2558                                 sizeof(xstats_names[0].name),
2559                                 "tx_q%u%s",
2560                                 id_queue, rte_txq_stats_strings[idx].name);
2561                         cnt_used_entries++;
2562                 }
2563         }
2564         return cnt_used_entries;
2565 }
2566
2567 /* retrieve ethdev extended statistics names */
2568 int
2569 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2570         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2571         uint64_t *ids)
2572 {
2573         struct rte_eth_xstat_name *xstats_names_copy;
2574         unsigned int no_basic_stat_requested = 1;
2575         unsigned int no_ext_stat_requested = 1;
2576         unsigned int expected_entries;
2577         unsigned int basic_count;
2578         struct rte_eth_dev *dev;
2579         unsigned int i;
2580         int ret;
2581
2582         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2583         dev = &rte_eth_devices[port_id];
2584
2585         basic_count = get_xstats_basic_count(dev);
2586         ret = get_xstats_count(port_id);
2587         if (ret < 0)
2588                 return ret;
2589         expected_entries = (unsigned int)ret;
2590
2591         /* Return max number of stats if no ids given */
2592         if (!ids) {
2593                 if (!xstats_names)
2594                         return expected_entries;
2595                 else if (xstats_names && size < expected_entries)
2596                         return expected_entries;
2597         }
2598
2599         if (ids && !xstats_names)
2600                 return -EINVAL;
2601
2602         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2603                 uint64_t ids_copy[size];
2604
2605                 for (i = 0; i < size; i++) {
2606                         if (ids[i] < basic_count) {
2607                                 no_basic_stat_requested = 0;
2608                                 break;
2609                         }
2610
2611                         /*
2612                          * Convert ids to xstats ids that PMD knows.
2613                          * ids known by user are basic + extended stats.
2614                          */
2615                         ids_copy[i] = ids[i] - basic_count;
2616                 }
2617
2618                 if (no_basic_stat_requested)
2619                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2620                                         xstats_names, ids_copy, size);
2621         }
2622
2623         /* Retrieve all stats */
2624         if (!ids) {
2625                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2626                                 expected_entries);
2627                 if (num_stats < 0 || num_stats > (int)expected_entries)
2628                         return num_stats;
2629                 else
2630                         return expected_entries;
2631         }
2632
2633         xstats_names_copy = calloc(expected_entries,
2634                 sizeof(struct rte_eth_xstat_name));
2635
2636         if (!xstats_names_copy) {
2637                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2638                 return -ENOMEM;
2639         }
2640
2641         if (ids) {
2642                 for (i = 0; i < size; i++) {
2643                         if (ids[i] >= basic_count) {
2644                                 no_ext_stat_requested = 0;
2645                                 break;
2646                         }
2647                 }
2648         }
2649
2650         /* Fill xstats_names_copy structure */
2651         if (ids && no_ext_stat_requested) {
2652                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2653         } else {
2654                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2655                         expected_entries);
2656                 if (ret < 0) {
2657                         free(xstats_names_copy);
2658                         return ret;
2659                 }
2660         }
2661
2662         /* Filter stats */
2663         for (i = 0; i < size; i++) {
2664                 if (ids[i] >= expected_entries) {
2665                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2666                         free(xstats_names_copy);
2667                         return -1;
2668                 }
2669                 xstats_names[i] = xstats_names_copy[ids[i]];
2670         }
2671
2672         free(xstats_names_copy);
2673         return size;
2674 }
2675
2676 int
2677 rte_eth_xstats_get_names(uint16_t port_id,
2678         struct rte_eth_xstat_name *xstats_names,
2679         unsigned int size)
2680 {
2681         struct rte_eth_dev *dev;
2682         int cnt_used_entries;
2683         int cnt_expected_entries;
2684         int cnt_driver_entries;
2685
2686         cnt_expected_entries = get_xstats_count(port_id);
2687         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2688                         (int)size < cnt_expected_entries)
2689                 return cnt_expected_entries;
2690
2691         /* port_id checked in get_xstats_count() */
2692         dev = &rte_eth_devices[port_id];
2693
2694         cnt_used_entries = rte_eth_basic_stats_get_names(
2695                 dev, xstats_names);
2696
2697         if (dev->dev_ops->xstats_get_names != NULL) {
2698                 /* If there are any driver-specific xstats, append them
2699                  * to end of list.
2700                  */
2701                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2702                         dev,
2703                         xstats_names + cnt_used_entries,
2704                         size - cnt_used_entries);
2705                 if (cnt_driver_entries < 0)
2706                         return eth_err(port_id, cnt_driver_entries);
2707                 cnt_used_entries += cnt_driver_entries;
2708         }
2709
2710         return cnt_used_entries;
2711 }
2712
2713
2714 static int
2715 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2716 {
2717         struct rte_eth_dev *dev;
2718         struct rte_eth_stats eth_stats;
2719         unsigned int count = 0, i, q;
2720         uint64_t val, *stats_ptr;
2721         uint16_t nb_rxqs, nb_txqs;
2722         int ret;
2723
2724         ret = rte_eth_stats_get(port_id, &eth_stats);
2725         if (ret < 0)
2726                 return ret;
2727
2728         dev = &rte_eth_devices[port_id];
2729
2730         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2731         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2732
2733         /* global stats */
2734         for (i = 0; i < RTE_NB_STATS; i++) {
2735                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2736                                         rte_stats_strings[i].offset);
2737                 val = *stats_ptr;
2738                 xstats[count++].value = val;
2739         }
2740
2741         /* per-rxq stats */
2742         for (q = 0; q < nb_rxqs; q++) {
2743                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2744                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2745                                         rte_rxq_stats_strings[i].offset +
2746                                         q * sizeof(uint64_t));
2747                         val = *stats_ptr;
2748                         xstats[count++].value = val;
2749                 }
2750         }
2751
2752         /* per-txq stats */
2753         for (q = 0; q < nb_txqs; q++) {
2754                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2755                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2756                                         rte_txq_stats_strings[i].offset +
2757                                         q * sizeof(uint64_t));
2758                         val = *stats_ptr;
2759                         xstats[count++].value = val;
2760                 }
2761         }
2762         return count;
2763 }
2764
2765 /* retrieve ethdev extended statistics */
2766 int
2767 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2768                          uint64_t *values, unsigned int size)
2769 {
2770         unsigned int no_basic_stat_requested = 1;
2771         unsigned int no_ext_stat_requested = 1;
2772         unsigned int num_xstats_filled;
2773         unsigned int basic_count;
2774         uint16_t expected_entries;
2775         struct rte_eth_dev *dev;
2776         unsigned int i;
2777         int ret;
2778
2779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2780         ret = get_xstats_count(port_id);
2781         if (ret < 0)
2782                 return ret;
2783         expected_entries = (uint16_t)ret;
2784         struct rte_eth_xstat xstats[expected_entries];
2785         dev = &rte_eth_devices[port_id];
2786         basic_count = get_xstats_basic_count(dev);
2787
2788         /* Return max number of stats if no ids given */
2789         if (!ids) {
2790                 if (!values)
2791                         return expected_entries;
2792                 else if (values && size < expected_entries)
2793                         return expected_entries;
2794         }
2795
2796         if (ids && !values)
2797                 return -EINVAL;
2798
2799         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2800                 unsigned int basic_count = get_xstats_basic_count(dev);
2801                 uint64_t ids_copy[size];
2802
2803                 for (i = 0; i < size; i++) {
2804                         if (ids[i] < basic_count) {
2805                                 no_basic_stat_requested = 0;
2806                                 break;
2807                         }
2808
2809                         /*
2810                          * Convert ids to xstats ids that PMD knows.
2811                          * ids known by user are basic + extended stats.
2812                          */
2813                         ids_copy[i] = ids[i] - basic_count;
2814                 }
2815
2816                 if (no_basic_stat_requested)
2817                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2818                                         values, size);
2819         }
2820
2821         if (ids) {
2822                 for (i = 0; i < size; i++) {
2823                         if (ids[i] >= basic_count) {
2824                                 no_ext_stat_requested = 0;
2825                                 break;
2826                         }
2827                 }
2828         }
2829
2830         /* Fill the xstats structure */
2831         if (ids && no_ext_stat_requested)
2832                 ret = rte_eth_basic_stats_get(port_id, xstats);
2833         else
2834                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2835
2836         if (ret < 0)
2837                 return ret;
2838         num_xstats_filled = (unsigned int)ret;
2839
2840         /* Return all stats */
2841         if (!ids) {
2842                 for (i = 0; i < num_xstats_filled; i++)
2843                         values[i] = xstats[i].value;
2844                 return expected_entries;
2845         }
2846
2847         /* Filter stats */
2848         for (i = 0; i < size; i++) {
2849                 if (ids[i] >= expected_entries) {
2850                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2851                         return -1;
2852                 }
2853                 values[i] = xstats[ids[i]].value;
2854         }
2855         return size;
2856 }
2857
2858 int
2859 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2860         unsigned int n)
2861 {
2862         struct rte_eth_dev *dev;
2863         unsigned int count = 0, i;
2864         signed int xcount = 0;
2865         uint16_t nb_rxqs, nb_txqs;
2866         int ret;
2867
2868         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2869
2870         dev = &rte_eth_devices[port_id];
2871
2872         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2873         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2874
2875         /* Return generic statistics */
2876         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2877                 (nb_txqs * RTE_NB_TXQ_STATS);
2878
2879         /* implemented by the driver */
2880         if (dev->dev_ops->xstats_get != NULL) {
2881                 /* Retrieve the xstats from the driver at the end of the
2882                  * xstats struct.
2883                  */
2884                 xcount = (*dev->dev_ops->xstats_get)(dev,
2885                                      xstats ? xstats + count : NULL,
2886                                      (n > count) ? n - count : 0);
2887
2888                 if (xcount < 0)
2889                         return eth_err(port_id, xcount);
2890         }
2891
2892         if (n < count + xcount || xstats == NULL)
2893                 return count + xcount;
2894
2895         /* now fill the xstats structure */
2896         ret = rte_eth_basic_stats_get(port_id, xstats);
2897         if (ret < 0)
2898                 return ret;
2899         count = ret;
2900
2901         for (i = 0; i < count; i++)
2902                 xstats[i].id = i;
2903         /* add an offset to driver-specific stats */
2904         for ( ; i < count + xcount; i++)
2905                 xstats[i].id += count;
2906
2907         return count + xcount;
2908 }
2909
2910 /* reset ethdev extended statistics */
2911 int
2912 rte_eth_xstats_reset(uint16_t port_id)
2913 {
2914         struct rte_eth_dev *dev;
2915
2916         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2917         dev = &rte_eth_devices[port_id];
2918
2919         /* implemented by the driver */
2920         if (dev->dev_ops->xstats_reset != NULL)
2921                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
2922
2923         /* fallback to default */
2924         return rte_eth_stats_reset(port_id);
2925 }
2926
2927 static int
2928 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2929                 uint8_t is_rx)
2930 {
2931         struct rte_eth_dev *dev;
2932
2933         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2934
2935         dev = &rte_eth_devices[port_id];
2936
2937         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2938
2939         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2940                 return -EINVAL;
2941
2942         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2943                 return -EINVAL;
2944
2945         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2946                 return -EINVAL;
2947
2948         return (*dev->dev_ops->queue_stats_mapping_set)
2949                         (dev, queue_id, stat_idx, is_rx);
2950 }
2951
2952
2953 int
2954 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2955                 uint8_t stat_idx)
2956 {
2957         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2958                                                 stat_idx, STAT_QMAP_TX));
2959 }
2960
2961
2962 int
2963 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2964                 uint8_t stat_idx)
2965 {
2966         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2967                                                 stat_idx, STAT_QMAP_RX));
2968 }
2969
2970 int
2971 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2972 {
2973         struct rte_eth_dev *dev;
2974
2975         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2976         dev = &rte_eth_devices[port_id];
2977
2978         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2979         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2980                                                         fw_version, fw_size));
2981 }
2982
2983 int
2984 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2985 {
2986         struct rte_eth_dev *dev;
2987         const struct rte_eth_desc_lim lim = {
2988                 .nb_max = UINT16_MAX,
2989                 .nb_min = 0,
2990                 .nb_align = 1,
2991                 .nb_seg_max = UINT16_MAX,
2992                 .nb_mtu_seg_max = UINT16_MAX,
2993         };
2994         int diag;
2995
2996         /*
2997          * Init dev_info before port_id check since caller does not have
2998          * return status and does not know if get is successful or not.
2999          */
3000         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3001         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3002
3003         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3004         dev = &rte_eth_devices[port_id];
3005
3006         dev_info->rx_desc_lim = lim;
3007         dev_info->tx_desc_lim = lim;
3008         dev_info->device = dev->device;
3009         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3010         dev_info->max_mtu = UINT16_MAX;
3011
3012         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3013         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3014         if (diag != 0) {
3015                 /* Cleanup already filled in device information */
3016                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3017                 return eth_err(port_id, diag);
3018         }
3019
3020         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3021         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3022                         RTE_MAX_QUEUES_PER_PORT);
3023         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3024                         RTE_MAX_QUEUES_PER_PORT);
3025
3026         dev_info->driver_name = dev->device->driver->name;
3027         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3028         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3029
3030         dev_info->dev_flags = &dev->data->dev_flags;
3031
3032         return 0;
3033 }
3034
3035 int
3036 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3037                                  uint32_t *ptypes, int num)
3038 {
3039         int i, j;
3040         struct rte_eth_dev *dev;
3041         const uint32_t *all_ptypes;
3042
3043         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3044         dev = &rte_eth_devices[port_id];
3045         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3046         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3047
3048         if (!all_ptypes)
3049                 return 0;
3050
3051         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3052                 if (all_ptypes[i] & ptype_mask) {
3053                         if (j < num)
3054                                 ptypes[j] = all_ptypes[i];
3055                         j++;
3056                 }
3057
3058         return j;
3059 }
3060
3061 int
3062 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3063                                  uint32_t *set_ptypes, unsigned int num)
3064 {
3065         const uint32_t valid_ptype_masks[] = {
3066                 RTE_PTYPE_L2_MASK,
3067                 RTE_PTYPE_L3_MASK,
3068                 RTE_PTYPE_L4_MASK,
3069                 RTE_PTYPE_TUNNEL_MASK,
3070                 RTE_PTYPE_INNER_L2_MASK,
3071                 RTE_PTYPE_INNER_L3_MASK,
3072                 RTE_PTYPE_INNER_L4_MASK,
3073         };
3074         const uint32_t *all_ptypes;
3075         struct rte_eth_dev *dev;
3076         uint32_t unused_mask;
3077         unsigned int i, j;
3078         int ret;
3079
3080         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3081         dev = &rte_eth_devices[port_id];
3082
3083         if (num > 0 && set_ptypes == NULL)
3084                 return -EINVAL;
3085
3086         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3087                         *dev->dev_ops->dev_ptypes_set == NULL) {
3088                 ret = 0;
3089                 goto ptype_unknown;
3090         }
3091
3092         if (ptype_mask == 0) {
3093                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3094                                 ptype_mask);
3095                 goto ptype_unknown;
3096         }
3097
3098         unused_mask = ptype_mask;
3099         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3100                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3101                 if (mask && mask != valid_ptype_masks[i]) {
3102                         ret = -EINVAL;
3103                         goto ptype_unknown;
3104                 }
3105                 unused_mask &= ~valid_ptype_masks[i];
3106         }
3107
3108         if (unused_mask) {
3109                 ret = -EINVAL;
3110                 goto ptype_unknown;
3111         }
3112
3113         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3114         if (all_ptypes == NULL) {
3115                 ret = 0;
3116                 goto ptype_unknown;
3117         }
3118
3119         /*
3120          * Accommodate as many set_ptypes as possible. If the supplied
3121          * set_ptypes array is insufficient fill it partially.
3122          */
3123         for (i = 0, j = 0; set_ptypes != NULL &&
3124                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3125                 if (ptype_mask & all_ptypes[i]) {
3126                         if (j < num - 1) {
3127                                 set_ptypes[j] = all_ptypes[i];
3128                                 j++;
3129                                 continue;
3130                         }
3131                         break;
3132                 }
3133         }
3134
3135         if (set_ptypes != NULL && j < num)
3136                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3137
3138         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3139
3140 ptype_unknown:
3141         if (num > 0)
3142                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3143
3144         return ret;
3145 }
3146
3147 int
3148 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3149 {
3150         struct rte_eth_dev *dev;
3151
3152         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3153         dev = &rte_eth_devices[port_id];
3154         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3155
3156         return 0;
3157 }
3158
3159 int
3160 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3161 {
3162         struct rte_eth_dev *dev;
3163
3164         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3165
3166         dev = &rte_eth_devices[port_id];
3167         *mtu = dev->data->mtu;
3168         return 0;
3169 }
3170
3171 int
3172 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3173 {
3174         int ret;
3175         struct rte_eth_dev_info dev_info;
3176         struct rte_eth_dev *dev;
3177
3178         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3179         dev = &rte_eth_devices[port_id];
3180         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3181
3182         /*
3183          * Check if the device supports dev_infos_get, if it does not
3184          * skip min_mtu/max_mtu validation here as this requires values
3185          * that are populated within the call to rte_eth_dev_info_get()
3186          * which relies on dev->dev_ops->dev_infos_get.
3187          */
3188         if (*dev->dev_ops->dev_infos_get != NULL) {
3189                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3190                 if (ret != 0)
3191                         return ret;
3192
3193                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3194                         return -EINVAL;
3195         }
3196
3197         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3198         if (!ret)
3199                 dev->data->mtu = mtu;
3200
3201         return eth_err(port_id, ret);
3202 }
3203
3204 int
3205 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3206 {
3207         struct rte_eth_dev *dev;
3208         int ret;
3209
3210         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3211         dev = &rte_eth_devices[port_id];
3212         if (!(dev->data->dev_conf.rxmode.offloads &
3213               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3214                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3215                         port_id);
3216                 return -ENOSYS;
3217         }
3218
3219         if (vlan_id > 4095) {
3220                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3221                         port_id, vlan_id);
3222                 return -EINVAL;
3223         }
3224         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3225
3226         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3227         if (ret == 0) {
3228                 struct rte_vlan_filter_conf *vfc;
3229                 int vidx;
3230                 int vbit;
3231
3232                 vfc = &dev->data->vlan_filter_conf;
3233                 vidx = vlan_id / 64;
3234                 vbit = vlan_id % 64;
3235
3236                 if (on)
3237                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3238                 else
3239                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3240         }
3241
3242         return eth_err(port_id, ret);
3243 }
3244
3245 int
3246 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3247                                     int on)
3248 {
3249         struct rte_eth_dev *dev;
3250
3251         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3252         dev = &rte_eth_devices[port_id];
3253         if (rx_queue_id >= dev->data->nb_rx_queues) {
3254                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3255                 return -EINVAL;
3256         }
3257
3258         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3259         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3260
3261         return 0;
3262 }
3263
3264 int
3265 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3266                                 enum rte_vlan_type vlan_type,
3267                                 uint16_t tpid)
3268 {
3269         struct rte_eth_dev *dev;
3270
3271         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3272         dev = &rte_eth_devices[port_id];
3273         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3274
3275         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3276                                                                tpid));
3277 }
3278
3279 int
3280 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3281 {
3282         struct rte_eth_dev_info dev_info;
3283         struct rte_eth_dev *dev;
3284         int ret = 0;
3285         int mask = 0;
3286         int cur, org = 0;
3287         uint64_t orig_offloads;
3288         uint64_t dev_offloads;
3289         uint64_t new_offloads;
3290
3291         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3292         dev = &rte_eth_devices[port_id];
3293
3294         /* save original values in case of failure */
3295         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3296         dev_offloads = orig_offloads;
3297
3298         /* check which option changed by application */
3299         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3300         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3301         if (cur != org) {
3302                 if (cur)
3303                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3304                 else
3305                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3306                 mask |= ETH_VLAN_STRIP_MASK;
3307         }
3308
3309         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3310         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3311         if (cur != org) {
3312                 if (cur)
3313                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3314                 else
3315                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3316                 mask |= ETH_VLAN_FILTER_MASK;
3317         }
3318
3319         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3320         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3321         if (cur != org) {
3322                 if (cur)
3323                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3324                 else
3325                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3326                 mask |= ETH_VLAN_EXTEND_MASK;
3327         }
3328
3329         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3330         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3331         if (cur != org) {
3332                 if (cur)
3333                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3334                 else
3335                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3336                 mask |= ETH_QINQ_STRIP_MASK;
3337         }
3338
3339         /*no change*/
3340         if (mask == 0)
3341                 return ret;
3342
3343         ret = rte_eth_dev_info_get(port_id, &dev_info);
3344         if (ret != 0)
3345                 return ret;
3346
3347         /* Rx VLAN offloading must be within its device capabilities */
3348         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3349                 new_offloads = dev_offloads & ~orig_offloads;
3350                 RTE_ETHDEV_LOG(ERR,
3351                         "Ethdev port_id=%u requested new added VLAN offloads "
3352                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3353                         "0x%" PRIx64 " in %s()\n",
3354                         port_id, new_offloads, dev_info.rx_offload_capa,
3355                         __func__);
3356                 return -EINVAL;
3357         }
3358
3359         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3360         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3361         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3362         if (ret) {
3363                 /* hit an error restore  original values */
3364                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3365         }
3366
3367         return eth_err(port_id, ret);
3368 }
3369
3370 int
3371 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3372 {
3373         struct rte_eth_dev *dev;
3374         uint64_t *dev_offloads;
3375         int ret = 0;
3376
3377         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3378         dev = &rte_eth_devices[port_id];
3379         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3380
3381         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3382                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3383
3384         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3385                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3386
3387         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3388                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3389
3390         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3391                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3392
3393         return ret;
3394 }
3395
3396 int
3397 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3398 {
3399         struct rte_eth_dev *dev;
3400
3401         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3402         dev = &rte_eth_devices[port_id];
3403         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3404
3405         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3406 }
3407
3408 int
3409 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3410 {
3411         struct rte_eth_dev *dev;
3412
3413         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3414         dev = &rte_eth_devices[port_id];
3415         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3416         memset(fc_conf, 0, sizeof(*fc_conf));
3417         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3418 }
3419
3420 int
3421 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3422 {
3423         struct rte_eth_dev *dev;
3424
3425         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3426         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3427                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3428                 return -EINVAL;
3429         }
3430
3431         dev = &rte_eth_devices[port_id];
3432         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3433         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3434 }
3435
3436 int
3437 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3438                                    struct rte_eth_pfc_conf *pfc_conf)
3439 {
3440         struct rte_eth_dev *dev;
3441
3442         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3443         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3444                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3445                 return -EINVAL;
3446         }
3447
3448         dev = &rte_eth_devices[port_id];
3449         /* High water, low water validation are device specific */
3450         if  (*dev->dev_ops->priority_flow_ctrl_set)
3451                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3452                                         (dev, pfc_conf));
3453         return -ENOTSUP;
3454 }
3455
3456 static int
3457 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3458                         uint16_t reta_size)
3459 {
3460         uint16_t i, num;
3461
3462         if (!reta_conf)
3463                 return -EINVAL;
3464
3465         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3466         for (i = 0; i < num; i++) {
3467                 if (reta_conf[i].mask)
3468                         return 0;
3469         }
3470
3471         return -EINVAL;
3472 }
3473
3474 static int
3475 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3476                          uint16_t reta_size,
3477                          uint16_t max_rxq)
3478 {
3479         uint16_t i, idx, shift;
3480
3481         if (!reta_conf)
3482                 return -EINVAL;
3483
3484         if (max_rxq == 0) {
3485                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3486                 return -EINVAL;
3487         }
3488
3489         for (i = 0; i < reta_size; i++) {
3490                 idx = i / RTE_RETA_GROUP_SIZE;
3491                 shift = i % RTE_RETA_GROUP_SIZE;
3492                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3493                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3494                         RTE_ETHDEV_LOG(ERR,
3495                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3496                                 idx, shift,
3497                                 reta_conf[idx].reta[shift], max_rxq);
3498                         return -EINVAL;
3499                 }
3500         }
3501
3502         return 0;
3503 }
3504
3505 int
3506 rte_eth_dev_rss_reta_update(uint16_t port_id,
3507                             struct rte_eth_rss_reta_entry64 *reta_conf,
3508                             uint16_t reta_size)
3509 {
3510         struct rte_eth_dev *dev;
3511         int ret;
3512
3513         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3514         /* Check mask bits */
3515         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3516         if (ret < 0)
3517                 return ret;
3518
3519         dev = &rte_eth_devices[port_id];
3520
3521         /* Check entry value */
3522         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3523                                 dev->data->nb_rx_queues);
3524         if (ret < 0)
3525                 return ret;
3526
3527         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3528         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3529                                                              reta_size));
3530 }
3531
3532 int
3533 rte_eth_dev_rss_reta_query(uint16_t port_id,
3534                            struct rte_eth_rss_reta_entry64 *reta_conf,
3535                            uint16_t reta_size)
3536 {
3537         struct rte_eth_dev *dev;
3538         int ret;
3539
3540         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3541
3542         /* Check mask bits */
3543         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3544         if (ret < 0)
3545                 return ret;
3546
3547         dev = &rte_eth_devices[port_id];
3548         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3549         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3550                                                             reta_size));
3551 }
3552
3553 int
3554 rte_eth_dev_rss_hash_update(uint16_t port_id,
3555                             struct rte_eth_rss_conf *rss_conf)
3556 {
3557         struct rte_eth_dev *dev;
3558         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3559         int ret;
3560
3561         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3562
3563         ret = rte_eth_dev_info_get(port_id, &dev_info);
3564         if (ret != 0)
3565                 return ret;
3566
3567         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3568
3569         dev = &rte_eth_devices[port_id];
3570         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3571             dev_info.flow_type_rss_offloads) {
3572                 RTE_ETHDEV_LOG(ERR,
3573                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3574                         port_id, rss_conf->rss_hf,
3575                         dev_info.flow_type_rss_offloads);
3576                 return -EINVAL;
3577         }
3578         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3579         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3580                                                                  rss_conf));
3581 }
3582
3583 int
3584 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3585                               struct rte_eth_rss_conf *rss_conf)
3586 {
3587         struct rte_eth_dev *dev;
3588
3589         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3590         dev = &rte_eth_devices[port_id];
3591         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3592         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3593                                                                    rss_conf));
3594 }
3595
3596 int
3597 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3598                                 struct rte_eth_udp_tunnel *udp_tunnel)
3599 {
3600         struct rte_eth_dev *dev;
3601
3602         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3603         if (udp_tunnel == NULL) {
3604                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3605                 return -EINVAL;
3606         }
3607
3608         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3609                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3610                 return -EINVAL;
3611         }
3612
3613         dev = &rte_eth_devices[port_id];
3614         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3615         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3616                                                                 udp_tunnel));
3617 }
3618
3619 int
3620 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3621                                    struct rte_eth_udp_tunnel *udp_tunnel)
3622 {
3623         struct rte_eth_dev *dev;
3624
3625         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3626         dev = &rte_eth_devices[port_id];
3627
3628         if (udp_tunnel == NULL) {
3629                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3630                 return -EINVAL;
3631         }
3632
3633         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3634                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3635                 return -EINVAL;
3636         }
3637
3638         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3639         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3640                                                                 udp_tunnel));
3641 }
3642
3643 int
3644 rte_eth_led_on(uint16_t port_id)
3645 {
3646         struct rte_eth_dev *dev;
3647
3648         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3649         dev = &rte_eth_devices[port_id];
3650         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3651         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3652 }
3653
3654 int
3655 rte_eth_led_off(uint16_t port_id)
3656 {
3657         struct rte_eth_dev *dev;
3658
3659         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3660         dev = &rte_eth_devices[port_id];
3661         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3662         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3663 }
3664
3665 int
3666 rte_eth_fec_get_capability(uint16_t port_id,
3667                            struct rte_eth_fec_capa *speed_fec_capa,
3668                            unsigned int num)
3669 {
3670         struct rte_eth_dev *dev;
3671         int ret;
3672
3673         if (speed_fec_capa == NULL && num > 0)
3674                 return -EINVAL;
3675
3676         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3677         dev = &rte_eth_devices[port_id];
3678         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
3679         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
3680
3681         return ret;
3682 }
3683
3684 int
3685 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
3686 {
3687         struct rte_eth_dev *dev;
3688
3689         if (fec_capa == NULL)
3690                 return -EINVAL;
3691
3692         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3693         dev = &rte_eth_devices[port_id];
3694         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
3695         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
3696 }
3697
3698 int
3699 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
3700 {
3701         struct rte_eth_dev *dev;
3702
3703         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3704         dev = &rte_eth_devices[port_id];
3705         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
3706         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
3707 }
3708
3709 /*
3710  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3711  * an empty spot.
3712  */
3713 static int
3714 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3715 {
3716         struct rte_eth_dev_info dev_info;
3717         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3718         unsigned i;
3719         int ret;
3720
3721         ret = rte_eth_dev_info_get(port_id, &dev_info);
3722         if (ret != 0)
3723                 return -1;
3724
3725         for (i = 0; i < dev_info.max_mac_addrs; i++)
3726                 if (memcmp(addr, &dev->data->mac_addrs[i],
3727                                 RTE_ETHER_ADDR_LEN) == 0)
3728                         return i;
3729
3730         return -1;
3731 }
3732
3733 static const struct rte_ether_addr null_mac_addr;
3734
3735 int
3736 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3737                         uint32_t pool)
3738 {
3739         struct rte_eth_dev *dev;
3740         int index;
3741         uint64_t pool_mask;
3742         int ret;
3743
3744         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3745         dev = &rte_eth_devices[port_id];
3746         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3747
3748         if (rte_is_zero_ether_addr(addr)) {
3749                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3750                         port_id);
3751                 return -EINVAL;
3752         }
3753         if (pool >= ETH_64_POOLS) {
3754                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3755                 return -EINVAL;
3756         }
3757
3758         index = get_mac_addr_index(port_id, addr);
3759         if (index < 0) {
3760                 index = get_mac_addr_index(port_id, &null_mac_addr);
3761                 if (index < 0) {
3762                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3763                                 port_id);
3764                         return -ENOSPC;
3765                 }
3766         } else {
3767                 pool_mask = dev->data->mac_pool_sel[index];
3768
3769                 /* Check if both MAC address and pool is already there, and do nothing */
3770                 if (pool_mask & (1ULL << pool))
3771                         return 0;
3772         }
3773
3774         /* Update NIC */
3775         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3776
3777         if (ret == 0) {
3778                 /* Update address in NIC data structure */
3779                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3780
3781                 /* Update pool bitmap in NIC data structure */
3782                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3783         }
3784
3785         return eth_err(port_id, ret);
3786 }
3787
3788 int
3789 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
3790 {
3791         struct rte_eth_dev *dev;
3792         int index;
3793
3794         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3795         dev = &rte_eth_devices[port_id];
3796         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3797
3798         index = get_mac_addr_index(port_id, addr);
3799         if (index == 0) {
3800                 RTE_ETHDEV_LOG(ERR,
3801                         "Port %u: Cannot remove default MAC address\n",
3802                         port_id);
3803                 return -EADDRINUSE;
3804         } else if (index < 0)
3805                 return 0;  /* Do nothing if address wasn't found */
3806
3807         /* Update NIC */
3808         (*dev->dev_ops->mac_addr_remove)(dev, index);
3809
3810         /* Update address in NIC data structure */
3811         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3812
3813         /* reset pool bitmap */
3814         dev->data->mac_pool_sel[index] = 0;
3815
3816         return 0;
3817 }
3818
3819 int
3820 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
3821 {
3822         struct rte_eth_dev *dev;
3823         int ret;
3824
3825         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3826
3827         if (!rte_is_valid_assigned_ether_addr(addr))
3828                 return -EINVAL;
3829
3830         dev = &rte_eth_devices[port_id];
3831         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3832
3833         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3834         if (ret < 0)
3835                 return ret;
3836
3837         /* Update default address in NIC data structure */
3838         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3839
3840         return 0;
3841 }
3842
3843
3844 /*
3845  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3846  * an empty spot.
3847  */
3848 static int
3849 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3850 {
3851         struct rte_eth_dev_info dev_info;
3852         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3853         unsigned i;
3854         int ret;
3855
3856         ret = rte_eth_dev_info_get(port_id, &dev_info);
3857         if (ret != 0)
3858                 return -1;
3859
3860         if (!dev->data->hash_mac_addrs)
3861                 return -1;
3862
3863         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3864                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3865                         RTE_ETHER_ADDR_LEN) == 0)
3866                         return i;
3867
3868         return -1;
3869 }
3870
3871 int
3872 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3873                                 uint8_t on)
3874 {
3875         int index;
3876         int ret;
3877         struct rte_eth_dev *dev;
3878
3879         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3880
3881         dev = &rte_eth_devices[port_id];
3882         if (rte_is_zero_ether_addr(addr)) {
3883                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3884                         port_id);
3885                 return -EINVAL;
3886         }
3887
3888         index = get_hash_mac_addr_index(port_id, addr);
3889         /* Check if it's already there, and do nothing */
3890         if ((index >= 0) && on)
3891                 return 0;
3892
3893         if (index < 0) {
3894                 if (!on) {
3895                         RTE_ETHDEV_LOG(ERR,
3896                                 "Port %u: the MAC address was not set in UTA\n",
3897                                 port_id);
3898                         return -EINVAL;
3899                 }
3900
3901                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3902                 if (index < 0) {
3903                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3904                                 port_id);
3905                         return -ENOSPC;
3906                 }
3907         }
3908
3909         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3910         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3911         if (ret == 0) {
3912                 /* Update address in NIC data structure */
3913                 if (on)
3914                         rte_ether_addr_copy(addr,
3915                                         &dev->data->hash_mac_addrs[index]);
3916                 else
3917                         rte_ether_addr_copy(&null_mac_addr,
3918                                         &dev->data->hash_mac_addrs[index]);
3919         }
3920
3921         return eth_err(port_id, ret);
3922 }
3923
3924 int
3925 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3926 {
3927         struct rte_eth_dev *dev;
3928
3929         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3930
3931         dev = &rte_eth_devices[port_id];
3932
3933         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3934         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3935                                                                        on));
3936 }
3937
3938 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3939                                         uint16_t tx_rate)
3940 {
3941         struct rte_eth_dev *dev;
3942         struct rte_eth_dev_info dev_info;
3943         struct rte_eth_link link;
3944         int ret;
3945
3946         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3947
3948         ret = rte_eth_dev_info_get(port_id, &dev_info);
3949         if (ret != 0)
3950                 return ret;
3951
3952         dev = &rte_eth_devices[port_id];
3953         link = dev->data->dev_link;
3954
3955         if (queue_idx > dev_info.max_tx_queues) {
3956                 RTE_ETHDEV_LOG(ERR,
3957                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3958                         port_id, queue_idx);
3959                 return -EINVAL;
3960         }
3961
3962         if (tx_rate > link.link_speed) {
3963                 RTE_ETHDEV_LOG(ERR,
3964                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3965                         tx_rate, link.link_speed);
3966                 return -EINVAL;
3967         }
3968
3969         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3970         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3971                                                         queue_idx, tx_rate));
3972 }
3973
3974 int
3975 rte_eth_mirror_rule_set(uint16_t port_id,
3976                         struct rte_eth_mirror_conf *mirror_conf,
3977                         uint8_t rule_id, uint8_t on)
3978 {
3979         struct rte_eth_dev *dev;
3980
3981         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3982         if (mirror_conf->rule_type == 0) {
3983                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3984                 return -EINVAL;
3985         }
3986
3987         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3988                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3989                         ETH_64_POOLS - 1);
3990                 return -EINVAL;
3991         }
3992
3993         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3994              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3995             (mirror_conf->pool_mask == 0)) {
3996                 RTE_ETHDEV_LOG(ERR,
3997                         "Invalid mirror pool, pool mask can not be 0\n");
3998                 return -EINVAL;
3999         }
4000
4001         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4002             mirror_conf->vlan.vlan_mask == 0) {
4003                 RTE_ETHDEV_LOG(ERR,
4004                         "Invalid vlan mask, vlan mask can not be 0\n");
4005                 return -EINVAL;
4006         }
4007
4008         dev = &rte_eth_devices[port_id];
4009         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4010
4011         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4012                                                 mirror_conf, rule_id, on));
4013 }
4014
4015 int
4016 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4017 {
4018         struct rte_eth_dev *dev;
4019
4020         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4021
4022         dev = &rte_eth_devices[port_id];
4023         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4024
4025         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
4026                                                                    rule_id));
4027 }
4028
4029 RTE_INIT(eth_dev_init_cb_lists)
4030 {
4031         int i;
4032
4033         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4034                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4035 }
4036
4037 int
4038 rte_eth_dev_callback_register(uint16_t port_id,
4039                         enum rte_eth_event_type event,
4040                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4041 {
4042         struct rte_eth_dev *dev;
4043         struct rte_eth_dev_callback *user_cb;
4044         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4045         uint16_t last_port;
4046
4047         if (!cb_fn)
4048                 return -EINVAL;
4049
4050         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4051                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4052                 return -EINVAL;
4053         }
4054
4055         if (port_id == RTE_ETH_ALL) {
4056                 next_port = 0;
4057                 last_port = RTE_MAX_ETHPORTS - 1;
4058         } else {
4059                 next_port = last_port = port_id;
4060         }
4061
4062         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4063
4064         do {
4065                 dev = &rte_eth_devices[next_port];
4066
4067                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4068                         if (user_cb->cb_fn == cb_fn &&
4069                                 user_cb->cb_arg == cb_arg &&
4070                                 user_cb->event == event) {
4071                                 break;
4072                         }
4073                 }
4074
4075                 /* create a new callback. */
4076                 if (user_cb == NULL) {
4077                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4078                                 sizeof(struct rte_eth_dev_callback), 0);
4079                         if (user_cb != NULL) {
4080                                 user_cb->cb_fn = cb_fn;
4081                                 user_cb->cb_arg = cb_arg;
4082                                 user_cb->event = event;
4083                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4084                                                   user_cb, next);
4085                         } else {
4086                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4087                                 rte_eth_dev_callback_unregister(port_id, event,
4088                                                                 cb_fn, cb_arg);
4089                                 return -ENOMEM;
4090                         }
4091
4092                 }
4093         } while (++next_port <= last_port);
4094
4095         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4096         return 0;
4097 }
4098
4099 int
4100 rte_eth_dev_callback_unregister(uint16_t port_id,
4101                         enum rte_eth_event_type event,
4102                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4103 {
4104         int ret;
4105         struct rte_eth_dev *dev;
4106         struct rte_eth_dev_callback *cb, *next;
4107         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4108         uint16_t last_port;
4109
4110         if (!cb_fn)
4111                 return -EINVAL;
4112
4113         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4114                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4115                 return -EINVAL;
4116         }
4117
4118         if (port_id == RTE_ETH_ALL) {
4119                 next_port = 0;
4120                 last_port = RTE_MAX_ETHPORTS - 1;
4121         } else {
4122                 next_port = last_port = port_id;
4123         }
4124
4125         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4126
4127         do {
4128                 dev = &rte_eth_devices[next_port];
4129                 ret = 0;
4130                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4131                      cb = next) {
4132
4133                         next = TAILQ_NEXT(cb, next);
4134
4135                         if (cb->cb_fn != cb_fn || cb->event != event ||
4136                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4137                                 continue;
4138
4139                         /*
4140                          * if this callback is not executing right now,
4141                          * then remove it.
4142                          */
4143                         if (cb->active == 0) {
4144                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4145                                 rte_free(cb);
4146                         } else {
4147                                 ret = -EAGAIN;
4148                         }
4149                 }
4150         } while (++next_port <= last_port);
4151
4152         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4153         return ret;
4154 }
4155
4156 int
4157 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4158         enum rte_eth_event_type event, void *ret_param)
4159 {
4160         struct rte_eth_dev_callback *cb_lst;
4161         struct rte_eth_dev_callback dev_cb;
4162         int rc = 0;
4163
4164         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4165         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4166                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4167                         continue;
4168                 dev_cb = *cb_lst;
4169                 cb_lst->active = 1;
4170                 if (ret_param != NULL)
4171                         dev_cb.ret_param = ret_param;
4172
4173                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4174                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4175                                 dev_cb.cb_arg, dev_cb.ret_param);
4176                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4177                 cb_lst->active = 0;
4178         }
4179         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4180         return rc;
4181 }
4182
4183 void
4184 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4185 {
4186         if (dev == NULL)
4187                 return;
4188
4189         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4190
4191         dev->state = RTE_ETH_DEV_ATTACHED;
4192 }
4193
4194 int
4195 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4196 {
4197         uint32_t vec;
4198         struct rte_eth_dev *dev;
4199         struct rte_intr_handle *intr_handle;
4200         uint16_t qid;
4201         int rc;
4202
4203         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4204
4205         dev = &rte_eth_devices[port_id];
4206
4207         if (!dev->intr_handle) {
4208                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4209                 return -ENOTSUP;
4210         }
4211
4212         intr_handle = dev->intr_handle;
4213         if (!intr_handle->intr_vec) {
4214                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4215                 return -EPERM;
4216         }
4217
4218         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4219                 vec = intr_handle->intr_vec[qid];
4220                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4221                 if (rc && rc != -EEXIST) {
4222                         RTE_ETHDEV_LOG(ERR,
4223                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4224                                 port_id, qid, op, epfd, vec);
4225                 }
4226         }
4227
4228         return 0;
4229 }
4230
4231 int
4232 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4233 {
4234         struct rte_intr_handle *intr_handle;
4235         struct rte_eth_dev *dev;
4236         unsigned int efd_idx;
4237         uint32_t vec;
4238         int fd;
4239
4240         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4241
4242         dev = &rte_eth_devices[port_id];
4243
4244         if (queue_id >= dev->data->nb_rx_queues) {
4245                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4246                 return -1;
4247         }
4248
4249         if (!dev->intr_handle) {
4250                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4251                 return -1;
4252         }
4253
4254         intr_handle = dev->intr_handle;
4255         if (!intr_handle->intr_vec) {
4256                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4257                 return -1;
4258         }
4259
4260         vec = intr_handle->intr_vec[queue_id];
4261         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4262                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4263         fd = intr_handle->efds[efd_idx];
4264
4265         return fd;
4266 }
4267
4268 static inline int
4269 eth_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4270                 const char *ring_name)
4271 {
4272         return snprintf(name, len, "eth_p%d_q%d_%s",
4273                         port_id, queue_id, ring_name);
4274 }
4275
4276 const struct rte_memzone *
4277 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4278                          uint16_t queue_id, size_t size, unsigned align,
4279                          int socket_id)
4280 {
4281         char z_name[RTE_MEMZONE_NAMESIZE];
4282         const struct rte_memzone *mz;
4283         int rc;
4284
4285         rc = eth_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4286                         queue_id, ring_name);
4287         if (rc >= RTE_MEMZONE_NAMESIZE) {
4288                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4289                 rte_errno = ENAMETOOLONG;
4290                 return NULL;
4291         }
4292
4293         mz = rte_memzone_lookup(z_name);
4294         if (mz) {
4295                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4296                                 size > mz->len ||
4297                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4298                         RTE_ETHDEV_LOG(ERR,
4299                                 "memzone %s does not justify the requested attributes\n",
4300                                 mz->name);
4301                         return NULL;
4302                 }
4303
4304                 return mz;
4305         }
4306
4307         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4308                         RTE_MEMZONE_IOVA_CONTIG, align);
4309 }
4310
4311 int
4312 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4313                 uint16_t queue_id)
4314 {
4315         char z_name[RTE_MEMZONE_NAMESIZE];
4316         const struct rte_memzone *mz;
4317         int rc = 0;
4318
4319         rc = eth_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4320                         queue_id, ring_name);
4321         if (rc >= RTE_MEMZONE_NAMESIZE) {
4322                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4323                 return -ENAMETOOLONG;
4324         }
4325
4326         mz = rte_memzone_lookup(z_name);
4327         if (mz)
4328                 rc = rte_memzone_free(mz);
4329         else
4330                 rc = -ENOENT;
4331
4332         return rc;
4333 }
4334
4335 int
4336 rte_eth_dev_create(struct rte_device *device, const char *name,
4337         size_t priv_data_size,
4338         ethdev_bus_specific_init ethdev_bus_specific_init,
4339         void *bus_init_params,
4340         ethdev_init_t ethdev_init, void *init_params)
4341 {
4342         struct rte_eth_dev *ethdev;
4343         int retval;
4344
4345         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4346
4347         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4348                 ethdev = rte_eth_dev_allocate(name);
4349                 if (!ethdev)
4350                         return -ENODEV;
4351
4352                 if (priv_data_size) {
4353                         ethdev->data->dev_private = rte_zmalloc_socket(
4354                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4355                                 device->numa_node);
4356
4357                         if (!ethdev->data->dev_private) {
4358                                 RTE_ETHDEV_LOG(ERR,
4359                                         "failed to allocate private data\n");
4360                                 retval = -ENOMEM;
4361                                 goto probe_failed;
4362                         }
4363                 }
4364         } else {
4365                 ethdev = rte_eth_dev_attach_secondary(name);
4366                 if (!ethdev) {
4367                         RTE_ETHDEV_LOG(ERR,
4368                                 "secondary process attach failed, ethdev doesn't exist\n");
4369                         return  -ENODEV;
4370                 }
4371         }
4372
4373         ethdev->device = device;
4374
4375         if (ethdev_bus_specific_init) {
4376                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4377                 if (retval) {
4378                         RTE_ETHDEV_LOG(ERR,
4379                                 "ethdev bus specific initialisation failed\n");
4380                         goto probe_failed;
4381                 }
4382         }
4383
4384         retval = ethdev_init(ethdev, init_params);
4385         if (retval) {
4386                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4387                 goto probe_failed;
4388         }
4389
4390         rte_eth_dev_probing_finish(ethdev);
4391
4392         return retval;
4393
4394 probe_failed:
4395         rte_eth_dev_release_port(ethdev);
4396         return retval;
4397 }
4398
4399 int
4400 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4401         ethdev_uninit_t ethdev_uninit)
4402 {
4403         int ret;
4404
4405         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4406         if (!ethdev)
4407                 return -ENODEV;
4408
4409         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4410
4411         ret = ethdev_uninit(ethdev);
4412         if (ret)
4413                 return ret;
4414
4415         return rte_eth_dev_release_port(ethdev);
4416 }
4417
4418 int
4419 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4420                           int epfd, int op, void *data)
4421 {
4422         uint32_t vec;
4423         struct rte_eth_dev *dev;
4424         struct rte_intr_handle *intr_handle;
4425         int rc;
4426
4427         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4428
4429         dev = &rte_eth_devices[port_id];
4430         if (queue_id >= dev->data->nb_rx_queues) {
4431                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4432                 return -EINVAL;
4433         }
4434
4435         if (!dev->intr_handle) {
4436                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4437                 return -ENOTSUP;
4438         }
4439
4440         intr_handle = dev->intr_handle;
4441         if (!intr_handle->intr_vec) {
4442                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4443                 return -EPERM;
4444         }
4445
4446         vec = intr_handle->intr_vec[queue_id];
4447         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4448         if (rc && rc != -EEXIST) {
4449                 RTE_ETHDEV_LOG(ERR,
4450                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4451                         port_id, queue_id, op, epfd, vec);
4452                 return rc;
4453         }
4454
4455         return 0;
4456 }
4457
4458 int
4459 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4460                            uint16_t queue_id)
4461 {
4462         struct rte_eth_dev *dev;
4463
4464         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4465
4466         dev = &rte_eth_devices[port_id];
4467
4468         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4469         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4470                                                                 queue_id));
4471 }
4472
4473 int
4474 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4475                             uint16_t queue_id)
4476 {
4477         struct rte_eth_dev *dev;
4478
4479         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4480
4481         dev = &rte_eth_devices[port_id];
4482
4483         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4484         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4485                                                                 queue_id));
4486 }
4487
4488
4489 int
4490 rte_eth_dev_filter_supported(uint16_t port_id,
4491                              enum rte_filter_type filter_type)
4492 {
4493         struct rte_eth_dev *dev;
4494
4495         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4496
4497         dev = &rte_eth_devices[port_id];
4498         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4499         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4500                                 RTE_ETH_FILTER_NOP, NULL);
4501 }
4502
4503 int
4504 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4505                         enum rte_filter_op filter_op, void *arg)
4506 {
4507         struct rte_eth_dev *dev;
4508
4509         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4510
4511         dev = &rte_eth_devices[port_id];
4512         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4513         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4514                                                              filter_op, arg));
4515 }
4516
4517 const struct rte_eth_rxtx_callback *
4518 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4519                 rte_rx_callback_fn fn, void *user_param)
4520 {
4521 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4522         rte_errno = ENOTSUP;
4523         return NULL;
4524 #endif
4525         struct rte_eth_dev *dev;
4526
4527         /* check input parameters */
4528         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4529                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4530                 rte_errno = EINVAL;
4531                 return NULL;
4532         }
4533         dev = &rte_eth_devices[port_id];
4534         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4535                 rte_errno = EINVAL;
4536                 return NULL;
4537         }
4538         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4539
4540         if (cb == NULL) {
4541                 rte_errno = ENOMEM;
4542                 return NULL;
4543         }
4544
4545         cb->fn.rx = fn;
4546         cb->param = user_param;
4547
4548         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4549         /* Add the callbacks in fifo order. */
4550         struct rte_eth_rxtx_callback *tail =
4551                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4552
4553         if (!tail) {
4554                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4555
4556         } else {
4557                 while (tail->next)
4558                         tail = tail->next;
4559                 tail->next = cb;
4560         }
4561         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4562
4563         return cb;
4564 }
4565
4566 const struct rte_eth_rxtx_callback *
4567 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4568                 rte_rx_callback_fn fn, void *user_param)
4569 {
4570 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4571         rte_errno = ENOTSUP;
4572         return NULL;
4573 #endif
4574         /* check input parameters */
4575         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4576                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4577                 rte_errno = EINVAL;
4578                 return NULL;
4579         }
4580
4581         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4582
4583         if (cb == NULL) {
4584                 rte_errno = ENOMEM;
4585                 return NULL;
4586         }
4587
4588         cb->fn.rx = fn;
4589         cb->param = user_param;
4590
4591         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4592         /* Add the callbacks at first position */
4593         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4594         rte_smp_wmb();
4595         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4596         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4597
4598         return cb;
4599 }
4600
4601 const struct rte_eth_rxtx_callback *
4602 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4603                 rte_tx_callback_fn fn, void *user_param)
4604 {
4605 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4606         rte_errno = ENOTSUP;
4607         return NULL;
4608 #endif
4609         struct rte_eth_dev *dev;
4610
4611         /* check input parameters */
4612         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4613                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4614                 rte_errno = EINVAL;
4615                 return NULL;
4616         }
4617
4618         dev = &rte_eth_devices[port_id];
4619         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4620                 rte_errno = EINVAL;
4621                 return NULL;
4622         }
4623
4624         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4625
4626         if (cb == NULL) {
4627                 rte_errno = ENOMEM;
4628                 return NULL;
4629         }
4630
4631         cb->fn.tx = fn;
4632         cb->param = user_param;
4633
4634         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4635         /* Add the callbacks in fifo order. */
4636         struct rte_eth_rxtx_callback *tail =
4637                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4638
4639         if (!tail) {
4640                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4641
4642         } else {
4643                 while (tail->next)
4644                         tail = tail->next;
4645                 tail->next = cb;
4646         }
4647         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4648
4649         return cb;
4650 }
4651
4652 int
4653 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4654                 const struct rte_eth_rxtx_callback *user_cb)
4655 {
4656 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4657         return -ENOTSUP;
4658 #endif
4659         /* Check input parameters. */
4660         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4661         if (user_cb == NULL ||
4662                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4663                 return -EINVAL;
4664
4665         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4666         struct rte_eth_rxtx_callback *cb;
4667         struct rte_eth_rxtx_callback **prev_cb;
4668         int ret = -EINVAL;
4669
4670         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4671         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4672         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4673                 cb = *prev_cb;
4674                 if (cb == user_cb) {
4675                         /* Remove the user cb from the callback list. */
4676                         *prev_cb = cb->next;
4677                         ret = 0;
4678                         break;
4679                 }
4680         }
4681         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4682
4683         return ret;
4684 }
4685
4686 int
4687 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4688                 const struct rte_eth_rxtx_callback *user_cb)
4689 {
4690 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4691         return -ENOTSUP;
4692 #endif
4693         /* Check input parameters. */
4694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4695         if (user_cb == NULL ||
4696                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4697                 return -EINVAL;
4698
4699         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4700         int ret = -EINVAL;
4701         struct rte_eth_rxtx_callback *cb;
4702         struct rte_eth_rxtx_callback **prev_cb;
4703
4704         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4705         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4706         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4707                 cb = *prev_cb;
4708                 if (cb == user_cb) {
4709                         /* Remove the user cb from the callback list. */
4710                         *prev_cb = cb->next;
4711                         ret = 0;
4712                         break;
4713                 }
4714         }
4715         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4716
4717         return ret;
4718 }
4719
4720 int
4721 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4722         struct rte_eth_rxq_info *qinfo)
4723 {
4724         struct rte_eth_dev *dev;
4725
4726         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4727
4728         if (qinfo == NULL)
4729                 return -EINVAL;
4730
4731         dev = &rte_eth_devices[port_id];
4732         if (queue_id >= dev->data->nb_rx_queues) {
4733                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4734                 return -EINVAL;
4735         }
4736
4737         if (dev->data->rx_queues == NULL ||
4738                         dev->data->rx_queues[queue_id] == NULL) {
4739                 RTE_ETHDEV_LOG(ERR,
4740                                "Rx queue %"PRIu16" of device with port_id=%"
4741                                PRIu16" has not been setup\n",
4742                                queue_id, port_id);
4743                 return -EINVAL;
4744         }
4745
4746         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4747                 RTE_ETHDEV_LOG(INFO,
4748                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4749                         queue_id, port_id);
4750                 return -EINVAL;
4751         }
4752
4753         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4754
4755         memset(qinfo, 0, sizeof(*qinfo));
4756         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4757         return 0;
4758 }
4759
4760 int
4761 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4762         struct rte_eth_txq_info *qinfo)
4763 {
4764         struct rte_eth_dev *dev;
4765
4766         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4767
4768         if (qinfo == NULL)
4769                 return -EINVAL;
4770
4771         dev = &rte_eth_devices[port_id];
4772         if (queue_id >= dev->data->nb_tx_queues) {
4773                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4774                 return -EINVAL;
4775         }
4776
4777         if (dev->data->tx_queues == NULL ||
4778                         dev->data->tx_queues[queue_id] == NULL) {
4779                 RTE_ETHDEV_LOG(ERR,
4780                                "Tx queue %"PRIu16" of device with port_id=%"
4781                                PRIu16" has not been setup\n",
4782                                queue_id, port_id);
4783                 return -EINVAL;
4784         }
4785
4786         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4787                 RTE_ETHDEV_LOG(INFO,
4788                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4789                         queue_id, port_id);
4790                 return -EINVAL;
4791         }
4792
4793         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4794
4795         memset(qinfo, 0, sizeof(*qinfo));
4796         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4797
4798         return 0;
4799 }
4800
4801 int
4802 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4803                           struct rte_eth_burst_mode *mode)
4804 {
4805         struct rte_eth_dev *dev;
4806
4807         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4808
4809         if (mode == NULL)
4810                 return -EINVAL;
4811
4812         dev = &rte_eth_devices[port_id];
4813
4814         if (queue_id >= dev->data->nb_rx_queues) {
4815                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4816                 return -EINVAL;
4817         }
4818
4819         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
4820         memset(mode, 0, sizeof(*mode));
4821         return eth_err(port_id,
4822                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
4823 }
4824
4825 int
4826 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4827                           struct rte_eth_burst_mode *mode)
4828 {
4829         struct rte_eth_dev *dev;
4830
4831         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4832
4833         if (mode == NULL)
4834                 return -EINVAL;
4835
4836         dev = &rte_eth_devices[port_id];
4837
4838         if (queue_id >= dev->data->nb_tx_queues) {
4839                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4840                 return -EINVAL;
4841         }
4842
4843         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
4844         memset(mode, 0, sizeof(*mode));
4845         return eth_err(port_id,
4846                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
4847 }
4848
4849 int
4850 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4851                              struct rte_ether_addr *mc_addr_set,
4852                              uint32_t nb_mc_addr)
4853 {
4854         struct rte_eth_dev *dev;
4855
4856         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4857
4858         dev = &rte_eth_devices[port_id];
4859         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4860         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4861                                                 mc_addr_set, nb_mc_addr));
4862 }
4863
4864 int
4865 rte_eth_timesync_enable(uint16_t port_id)
4866 {
4867         struct rte_eth_dev *dev;
4868
4869         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4870         dev = &rte_eth_devices[port_id];
4871
4872         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4873         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4874 }
4875
4876 int
4877 rte_eth_timesync_disable(uint16_t port_id)
4878 {
4879         struct rte_eth_dev *dev;
4880
4881         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4882         dev = &rte_eth_devices[port_id];
4883
4884         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4885         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4886 }
4887
4888 int
4889 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4890                                    uint32_t flags)
4891 {
4892         struct rte_eth_dev *dev;
4893
4894         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4895         dev = &rte_eth_devices[port_id];
4896
4897         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4898         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4899                                 (dev, timestamp, flags));
4900 }
4901
4902 int
4903 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4904                                    struct timespec *timestamp)
4905 {
4906         struct rte_eth_dev *dev;
4907
4908         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4909         dev = &rte_eth_devices[port_id];
4910
4911         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4912         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4913                                 (dev, timestamp));
4914 }
4915
4916 int
4917 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4918 {
4919         struct rte_eth_dev *dev;
4920
4921         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4922         dev = &rte_eth_devices[port_id];
4923
4924         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4925         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4926                                                                       delta));
4927 }
4928
4929 int
4930 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4931 {
4932         struct rte_eth_dev *dev;
4933
4934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4935         dev = &rte_eth_devices[port_id];
4936
4937         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4938         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4939                                                                 timestamp));
4940 }
4941
4942 int
4943 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4944 {
4945         struct rte_eth_dev *dev;
4946
4947         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4948         dev = &rte_eth_devices[port_id];
4949
4950         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4951         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4952                                                                 timestamp));
4953 }
4954
4955 int
4956 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
4957 {
4958         struct rte_eth_dev *dev;
4959
4960         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4961         dev = &rte_eth_devices[port_id];
4962
4963         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
4964         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
4965 }
4966
4967 int
4968 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4969 {
4970         struct rte_eth_dev *dev;
4971
4972         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4973
4974         dev = &rte_eth_devices[port_id];
4975         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4976         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4977 }
4978
4979 int
4980 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4981 {
4982         struct rte_eth_dev *dev;
4983
4984         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4985
4986         dev = &rte_eth_devices[port_id];
4987         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4988         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4989 }
4990
4991 int
4992 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4993 {
4994         struct rte_eth_dev *dev;
4995
4996         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4997
4998         dev = &rte_eth_devices[port_id];
4999         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5000         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5001 }
5002
5003 int
5004 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5005 {
5006         struct rte_eth_dev *dev;
5007
5008         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5009
5010         dev = &rte_eth_devices[port_id];
5011         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5012         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5013 }
5014
5015 int
5016 rte_eth_dev_get_module_info(uint16_t port_id,
5017                             struct rte_eth_dev_module_info *modinfo)
5018 {
5019         struct rte_eth_dev *dev;
5020
5021         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5022
5023         dev = &rte_eth_devices[port_id];
5024         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5025         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5026 }
5027
5028 int
5029 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5030                               struct rte_dev_eeprom_info *info)
5031 {
5032         struct rte_eth_dev *dev;
5033
5034         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5035
5036         dev = &rte_eth_devices[port_id];
5037         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5038         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5039 }
5040
5041 int
5042 rte_eth_dev_get_dcb_info(uint16_t port_id,
5043                              struct rte_eth_dcb_info *dcb_info)
5044 {
5045         struct rte_eth_dev *dev;
5046
5047         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5048
5049         dev = &rte_eth_devices[port_id];
5050         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5051
5052         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5053         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5054 }
5055
5056 int
5057 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
5058                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
5059 {
5060         struct rte_eth_dev *dev;
5061
5062         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5063         if (l2_tunnel == NULL) {
5064                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
5065                 return -EINVAL;
5066         }
5067
5068         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
5069                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
5070                 return -EINVAL;
5071         }
5072
5073         dev = &rte_eth_devices[port_id];
5074         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
5075                                 -ENOTSUP);
5076         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
5077                                                                 l2_tunnel));
5078 }
5079
5080 int
5081 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
5082                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
5083                                   uint32_t mask,
5084                                   uint8_t en)
5085 {
5086         struct rte_eth_dev *dev;
5087
5088         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5089
5090         if (l2_tunnel == NULL) {
5091                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
5092                 return -EINVAL;
5093         }
5094
5095         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
5096                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
5097                 return -EINVAL;
5098         }
5099
5100         if (mask == 0) {
5101                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
5102                 return -EINVAL;
5103         }
5104
5105         dev = &rte_eth_devices[port_id];
5106         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
5107                                 -ENOTSUP);
5108         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
5109                                                         l2_tunnel, mask, en));
5110 }
5111
5112 static void
5113 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5114                            const struct rte_eth_desc_lim *desc_lim)
5115 {
5116         if (desc_lim->nb_align != 0)
5117                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5118
5119         if (desc_lim->nb_max != 0)
5120                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5121
5122         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5123 }
5124
5125 int
5126 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5127                                  uint16_t *nb_rx_desc,
5128                                  uint16_t *nb_tx_desc)
5129 {
5130         struct rte_eth_dev_info dev_info;
5131         int ret;
5132
5133         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5134
5135         ret = rte_eth_dev_info_get(port_id, &dev_info);
5136         if (ret != 0)
5137                 return ret;
5138
5139         if (nb_rx_desc != NULL)
5140                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5141
5142         if (nb_tx_desc != NULL)
5143                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5144
5145         return 0;
5146 }
5147
5148 int
5149 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5150                                    struct rte_eth_hairpin_cap *cap)
5151 {
5152         struct rte_eth_dev *dev;
5153
5154         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
5155
5156         dev = &rte_eth_devices[port_id];
5157         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5158         memset(cap, 0, sizeof(*cap));
5159         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5160 }
5161
5162 int
5163 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5164 {
5165         if (dev->data->rx_queue_state[queue_id] ==
5166             RTE_ETH_QUEUE_STATE_HAIRPIN)
5167                 return 1;
5168         return 0;
5169 }
5170
5171 int
5172 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5173 {
5174         if (dev->data->tx_queue_state[queue_id] ==
5175             RTE_ETH_QUEUE_STATE_HAIRPIN)
5176                 return 1;
5177         return 0;
5178 }
5179
5180 int
5181 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5182 {
5183         struct rte_eth_dev *dev;
5184
5185         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5186
5187         if (pool == NULL)
5188                 return -EINVAL;
5189
5190         dev = &rte_eth_devices[port_id];
5191
5192         if (*dev->dev_ops->pool_ops_supported == NULL)
5193                 return 1; /* all pools are supported */
5194
5195         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5196 }
5197
5198 /**
5199  * A set of values to describe the possible states of a switch domain.
5200  */
5201 enum rte_eth_switch_domain_state {
5202         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5203         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5204 };
5205
5206 /**
5207  * Array of switch domains available for allocation. Array is sized to
5208  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5209  * ethdev ports in a single process.
5210  */
5211 static struct rte_eth_dev_switch {
5212         enum rte_eth_switch_domain_state state;
5213 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
5214
5215 int
5216 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5217 {
5218         unsigned int i;
5219
5220         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5221
5222         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5223                 if (rte_eth_switch_domains[i].state ==
5224                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5225                         rte_eth_switch_domains[i].state =
5226                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5227                         *domain_id = i;
5228                         return 0;
5229                 }
5230         }
5231
5232         return -ENOSPC;
5233 }
5234
5235 int
5236 rte_eth_switch_domain_free(uint16_t domain_id)
5237 {
5238         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5239                 domain_id >= RTE_MAX_ETHPORTS)
5240                 return -EINVAL;
5241
5242         if (rte_eth_switch_domains[domain_id].state !=
5243                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5244                 return -EINVAL;
5245
5246         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5247
5248         return 0;
5249 }
5250
5251 static int
5252 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5253 {
5254         int state;
5255         struct rte_kvargs_pair *pair;
5256         char *letter;
5257
5258         arglist->str = strdup(str_in);
5259         if (arglist->str == NULL)
5260                 return -ENOMEM;
5261
5262         letter = arglist->str;
5263         state = 0;
5264         arglist->count = 0;
5265         pair = &arglist->pairs[0];
5266         while (1) {
5267                 switch (state) {
5268                 case 0: /* Initial */
5269                         if (*letter == '=')
5270                                 return -EINVAL;
5271                         else if (*letter == '\0')
5272                                 return 0;
5273
5274                         state = 1;
5275                         pair->key = letter;
5276                         /* fall-thru */
5277
5278                 case 1: /* Parsing key */
5279                         if (*letter == '=') {
5280                                 *letter = '\0';
5281                                 pair->value = letter + 1;
5282                                 state = 2;
5283                         } else if (*letter == ',' || *letter == '\0')
5284                                 return -EINVAL;
5285                         break;
5286
5287
5288                 case 2: /* Parsing value */
5289                         if (*letter == '[')
5290                                 state = 3;
5291                         else if (*letter == ',') {
5292                                 *letter = '\0';
5293                                 arglist->count++;
5294                                 pair = &arglist->pairs[arglist->count];
5295                                 state = 0;
5296                         } else if (*letter == '\0') {
5297                                 letter--;
5298                                 arglist->count++;
5299                                 pair = &arglist->pairs[arglist->count];
5300                                 state = 0;
5301                         }
5302                         break;
5303
5304                 case 3: /* Parsing list */
5305                         if (*letter == ']')
5306                                 state = 2;
5307                         else if (*letter == '\0')
5308                                 return -EINVAL;
5309                         break;
5310                 }
5311                 letter++;
5312         }
5313 }
5314
5315 int
5316 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5317 {
5318         struct rte_kvargs args;
5319         struct rte_kvargs_pair *pair;
5320         unsigned int i;
5321         int result = 0;
5322
5323         memset(eth_da, 0, sizeof(*eth_da));
5324
5325         result = rte_eth_devargs_tokenise(&args, dargs);
5326         if (result < 0)
5327                 goto parse_cleanup;
5328
5329         for (i = 0; i < args.count; i++) {
5330                 pair = &args.pairs[i];
5331                 if (strcmp("representor", pair->key) == 0) {
5332                         result = rte_eth_devargs_parse_list(pair->value,
5333                                 rte_eth_devargs_parse_representor_ports,
5334                                 eth_da);
5335                         if (result < 0)
5336                                 goto parse_cleanup;
5337                 }
5338         }
5339
5340 parse_cleanup:
5341         if (args.str)
5342                 free(args.str);
5343
5344         return result;
5345 }
5346
5347 static int
5348 handle_port_list(const char *cmd __rte_unused,
5349                 const char *params __rte_unused,
5350                 struct rte_tel_data *d)
5351 {
5352         int port_id;
5353
5354         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5355         RTE_ETH_FOREACH_DEV(port_id)
5356                 rte_tel_data_add_array_int(d, port_id);
5357         return 0;
5358 }
5359
5360 static void
5361 add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5362                 const char *stat_name)
5363 {
5364         int q;
5365         struct rte_tel_data *q_data = rte_tel_data_alloc();
5366         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5367         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5368                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
5369         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5370 }
5371
5372 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5373
5374 static int
5375 handle_port_stats(const char *cmd __rte_unused,
5376                 const char *params,
5377                 struct rte_tel_data *d)
5378 {
5379         struct rte_eth_stats stats;
5380         int port_id, ret;
5381
5382         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5383                 return -1;
5384
5385         port_id = atoi(params);
5386         if (!rte_eth_dev_is_valid_port(port_id))
5387                 return -1;
5388
5389         ret = rte_eth_stats_get(port_id, &stats);
5390         if (ret < 0)
5391                 return -1;
5392
5393         rte_tel_data_start_dict(d);
5394         ADD_DICT_STAT(stats, ipackets);
5395         ADD_DICT_STAT(stats, opackets);
5396         ADD_DICT_STAT(stats, ibytes);
5397         ADD_DICT_STAT(stats, obytes);
5398         ADD_DICT_STAT(stats, imissed);
5399         ADD_DICT_STAT(stats, ierrors);
5400         ADD_DICT_STAT(stats, oerrors);
5401         ADD_DICT_STAT(stats, rx_nombuf);
5402         add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5403         add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5404         add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5405         add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5406         add_port_queue_stats(d, stats.q_errors, "q_errors");
5407
5408         return 0;
5409 }
5410
5411 static int
5412 handle_port_xstats(const char *cmd __rte_unused,
5413                 const char *params,
5414                 struct rte_tel_data *d)
5415 {
5416         struct rte_eth_xstat *eth_xstats;
5417         struct rte_eth_xstat_name *xstat_names;
5418         int port_id, num_xstats;
5419         int i, ret;
5420         char *end_param;
5421
5422         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5423                 return -1;
5424
5425         port_id = strtoul(params, &end_param, 0);
5426         if (*end_param != '\0')
5427                 RTE_ETHDEV_LOG(NOTICE,
5428                         "Extra parameters passed to ethdev telemetry command, ignoring");
5429         if (!rte_eth_dev_is_valid_port(port_id))
5430                 return -1;
5431
5432         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5433         if (num_xstats < 0)
5434                 return -1;
5435
5436         /* use one malloc for both names and stats */
5437         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5438                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
5439         if (eth_xstats == NULL)
5440                 return -1;
5441         xstat_names = (void *)&eth_xstats[num_xstats];
5442
5443         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5444         if (ret < 0 || ret > num_xstats) {
5445                 free(eth_xstats);
5446                 return -1;
5447         }
5448
5449         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5450         if (ret < 0 || ret > num_xstats) {
5451                 free(eth_xstats);
5452                 return -1;
5453         }
5454
5455         rte_tel_data_start_dict(d);
5456         for (i = 0; i < num_xstats; i++)
5457                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5458                                 eth_xstats[i].value);
5459         return 0;
5460 }
5461
5462 static int
5463 handle_port_link_status(const char *cmd __rte_unused,
5464                 const char *params,
5465                 struct rte_tel_data *d)
5466 {
5467         static const char *status_str = "status";
5468         int ret, port_id;
5469         struct rte_eth_link link;
5470         char *end_param;
5471
5472         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5473                 return -1;
5474
5475         port_id = strtoul(params, &end_param, 0);
5476         if (*end_param != '\0')
5477                 RTE_ETHDEV_LOG(NOTICE,
5478                         "Extra parameters passed to ethdev telemetry command, ignoring");
5479         if (!rte_eth_dev_is_valid_port(port_id))
5480                 return -1;
5481
5482         ret = rte_eth_link_get(port_id, &link);
5483         if (ret < 0)
5484                 return -1;
5485
5486         rte_tel_data_start_dict(d);
5487         if (!link.link_status) {
5488                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
5489                 return 0;
5490         }
5491         rte_tel_data_add_dict_string(d, status_str, "UP");
5492         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5493         rte_tel_data_add_dict_string(d, "duplex",
5494                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
5495                                 "full-duplex" : "half-duplex");
5496         return 0;
5497 }
5498
5499 RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
5500
5501 RTE_INIT(ethdev_init_telemetry)
5502 {
5503         rte_telemetry_register_cmd("/ethdev/list", handle_port_list,
5504                         "Returns list of available ethdev ports. Takes no parameters");
5505         rte_telemetry_register_cmd("/ethdev/stats", handle_port_stats,
5506                         "Returns the common stats for a port. Parameters: int port_id");
5507         rte_telemetry_register_cmd("/ethdev/xstats", handle_port_xstats,
5508                         "Returns the extended stats for a port. Parameters: int port_id");
5509         rte_telemetry_register_cmd("/ethdev/link_status",
5510                         handle_port_link_status,
5511                         "Returns the link status for a port. Parameters: int port_id");
5512 }