d5adf4ff05758ecaf6c6b408831e0adde80415ea
[dpdk.git] / lib / ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13
14 #include <rte_byteorder.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_interrupts.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_memzone.h>
21 #include <rte_launch.h>
22 #include <rte_eal.h>
23 #include <rte_per_lcore.h>
24 #include <rte_lcore.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_common.h>
27 #include <rte_mempool.h>
28 #include <rte_malloc.h>
29 #include <rte_mbuf.h>
30 #include <rte_errno.h>
31 #include <rte_spinlock.h>
32 #include <rte_string_fns.h>
33 #include <rte_kvargs.h>
34 #include <rte_class.h>
35 #include <rte_ether.h>
36 #include <rte_telemetry.h>
37
38 #include "rte_ethdev_trace.h"
39 #include "rte_ethdev.h"
40 #include "ethdev_driver.h"
41 #include "ethdev_profile.h"
42 #include "ethdev_private.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46
47 /* spinlock for eth device callbacks */
48 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* spinlock for add/remove rx callbacks */
51 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
52
53 /* spinlock for add/remove tx callbacks */
54 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for shared data allocation */
57 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* store statistics names and its offset in stats structure  */
60 struct rte_eth_xstats_name_off {
61         char name[RTE_ETH_XSTATS_NAME_SIZE];
62         unsigned offset;
63 };
64
65 /* Shared memory between primary and secondary processes. */
66 static struct {
67         uint64_t next_owner_id;
68         rte_spinlock_t ownership_lock;
69         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
70 } *eth_dev_shared_data;
71
72 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
73         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
74         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
75         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
76         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
77         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
78         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
79         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
80         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
81                 rx_nombuf)},
82 };
83
84 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
85
86 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
87         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
88         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
89         {"errors", offsetof(struct rte_eth_stats, q_errors)},
90 };
91
92 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
93
94 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
95         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
96         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
97 };
98 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
99
100 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
101         { DEV_RX_OFFLOAD_##_name, #_name }
102
103 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)       \
104         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
105
106 static const struct {
107         uint64_t offload;
108         const char *name;
109 } eth_dev_rx_offload_names[] = {
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
111         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
112         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
113         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
115         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
119         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
120         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
121         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
122         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
123         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
124         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
125         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
126         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
127         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
128         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
129         RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
130 };
131
132 #undef RTE_RX_OFFLOAD_BIT2STR
133 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } eth_dev_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
163         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
164 };
165
166 #undef RTE_TX_OFFLOAD_BIT2STR
167
168 /**
169  * The user application callback description.
170  *
171  * It contains callback address to be registered by user application,
172  * the pointer to the parameters for callback, and the event type.
173  */
174 struct rte_eth_dev_callback {
175         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
176         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
177         void *cb_arg;                           /**< Parameter for callback */
178         void *ret_param;                        /**< Return parameter */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 int
189 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
190 {
191         int ret;
192         struct rte_devargs devargs;
193         const char *bus_param_key;
194         char *bus_str = NULL;
195         char *cls_str = NULL;
196         int str_size;
197
198         memset(iter, 0, sizeof(*iter));
199         memset(&devargs, 0, sizeof(devargs));
200
201         /*
202          * The devargs string may use various syntaxes:
203          *   - 0000:08:00.0,representor=[1-3]
204          *   - pci:0000:06:00.0,representor=[0,5]
205          *   - class=eth,mac=00:11:22:33:44:55
206          * A new syntax is in development (not yet supported):
207          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
208          */
209
210         /*
211          * Handle pure class filter (i.e. without any bus-level argument),
212          * from future new syntax.
213          * rte_devargs_parse() is not yet supporting the new syntax,
214          * that's why this simple case is temporarily parsed here.
215          */
216 #define iter_anybus_str "class=eth,"
217         if (strncmp(devargs_str, iter_anybus_str,
218                         strlen(iter_anybus_str)) == 0) {
219                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
220                 goto end;
221         }
222
223         /* Split bus, device and parameters. */
224         ret = rte_devargs_parse(&devargs, devargs_str);
225         if (ret != 0)
226                 goto error;
227
228         /*
229          * Assume parameters of old syntax can match only at ethdev level.
230          * Extra parameters will be ignored, thanks to "+" prefix.
231          */
232         str_size = strlen(devargs.args) + 2;
233         cls_str = malloc(str_size);
234         if (cls_str == NULL) {
235                 ret = -ENOMEM;
236                 goto error;
237         }
238         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
239         if (ret != str_size - 1) {
240                 ret = -EINVAL;
241                 goto error;
242         }
243         iter->cls_str = cls_str;
244
245         iter->bus = devargs.bus;
246         if (iter->bus->dev_iterate == NULL) {
247                 ret = -ENOTSUP;
248                 goto error;
249         }
250
251         /* Convert bus args to new syntax for use with new API dev_iterate. */
252         if (strcmp(iter->bus->name, "vdev") == 0) {
253                 bus_param_key = "name";
254         } else if (strcmp(iter->bus->name, "pci") == 0) {
255                 bus_param_key = "addr";
256         } else {
257                 ret = -ENOTSUP;
258                 goto error;
259         }
260         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
261         bus_str = malloc(str_size);
262         if (bus_str == NULL) {
263                 ret = -ENOMEM;
264                 goto error;
265         }
266         ret = snprintf(bus_str, str_size, "%s=%s",
267                         bus_param_key, devargs.name);
268         if (ret != str_size - 1) {
269                 ret = -EINVAL;
270                 goto error;
271         }
272         iter->bus_str = bus_str;
273
274 end:
275         iter->cls = rte_class_find_by_name("eth");
276         rte_devargs_reset(&devargs);
277         return 0;
278
279 error:
280         if (ret == -ENOTSUP)
281                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
282                                 iter->bus->name);
283         rte_devargs_reset(&devargs);
284         free(bus_str);
285         free(cls_str);
286         return ret;
287 }
288
289 uint16_t
290 rte_eth_iterator_next(struct rte_dev_iterator *iter)
291 {
292         if (iter->cls == NULL) /* invalid ethdev iterator */
293                 return RTE_MAX_ETHPORTS;
294
295         do { /* loop to try all matching rte_device */
296                 /* If not pure ethdev filter and */
297                 if (iter->bus != NULL &&
298                                 /* not in middle of rte_eth_dev iteration, */
299                                 iter->class_device == NULL) {
300                         /* get next rte_device to try. */
301                         iter->device = iter->bus->dev_iterate(
302                                         iter->device, iter->bus_str, iter);
303                         if (iter->device == NULL)
304                                 break; /* no more rte_device candidate */
305                 }
306                 /* A device is matching bus part, need to check ethdev part. */
307                 iter->class_device = iter->cls->dev_iterate(
308                                 iter->class_device, iter->cls_str, iter);
309                 if (iter->class_device != NULL)
310                         return eth_dev_to_id(iter->class_device); /* match */
311         } while (iter->bus != NULL); /* need to try next rte_device */
312
313         /* No more ethdev port to iterate. */
314         rte_eth_iterator_cleanup(iter);
315         return RTE_MAX_ETHPORTS;
316 }
317
318 void
319 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
320 {
321         if (iter->bus_str == NULL)
322                 return; /* nothing to free in pure class filter */
323         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
324         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
325         memset(iter, 0, sizeof(*iter));
326 }
327
328 uint16_t
329 rte_eth_find_next(uint16_t port_id)
330 {
331         while (port_id < RTE_MAX_ETHPORTS &&
332                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
333                 port_id++;
334
335         if (port_id >= RTE_MAX_ETHPORTS)
336                 return RTE_MAX_ETHPORTS;
337
338         return port_id;
339 }
340
341 /*
342  * Macro to iterate over all valid ports for internal usage.
343  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
344  */
345 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
346         for (port_id = rte_eth_find_next(0); \
347              port_id < RTE_MAX_ETHPORTS; \
348              port_id = rte_eth_find_next(port_id + 1))
349
350 uint16_t
351 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
352 {
353         port_id = rte_eth_find_next(port_id);
354         while (port_id < RTE_MAX_ETHPORTS &&
355                         rte_eth_devices[port_id].device != parent)
356                 port_id = rte_eth_find_next(port_id + 1);
357
358         return port_id;
359 }
360
361 uint16_t
362 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
363 {
364         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
365         return rte_eth_find_next_of(port_id,
366                         rte_eth_devices[ref_port_id].device);
367 }
368
369 static void
370 eth_dev_shared_data_prepare(void)
371 {
372         const unsigned flags = 0;
373         const struct rte_memzone *mz;
374
375         rte_spinlock_lock(&eth_dev_shared_data_lock);
376
377         if (eth_dev_shared_data == NULL) {
378                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
379                         /* Allocate port data and ownership shared memory. */
380                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
381                                         sizeof(*eth_dev_shared_data),
382                                         rte_socket_id(), flags);
383                 } else
384                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
385                 if (mz == NULL)
386                         rte_panic("Cannot allocate ethdev shared data\n");
387
388                 eth_dev_shared_data = mz->addr;
389                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
390                         eth_dev_shared_data->next_owner_id =
391                                         RTE_ETH_DEV_NO_OWNER + 1;
392                         rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
393                         memset(eth_dev_shared_data->data, 0,
394                                sizeof(eth_dev_shared_data->data));
395                 }
396         }
397
398         rte_spinlock_unlock(&eth_dev_shared_data_lock);
399 }
400
401 static bool
402 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
403 {
404         return ethdev->data->name[0] != '\0';
405 }
406
407 static struct rte_eth_dev *
408 eth_dev_allocated(const char *name)
409 {
410         uint16_t i;
411
412         RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
413
414         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
415                 if (rte_eth_devices[i].data != NULL &&
416                     strcmp(rte_eth_devices[i].data->name, name) == 0)
417                         return &rte_eth_devices[i];
418         }
419         return NULL;
420 }
421
422 struct rte_eth_dev *
423 rte_eth_dev_allocated(const char *name)
424 {
425         struct rte_eth_dev *ethdev;
426
427         eth_dev_shared_data_prepare();
428
429         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
430
431         ethdev = eth_dev_allocated(name);
432
433         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
434
435         return ethdev;
436 }
437
438 static uint16_t
439 eth_dev_find_free_port(void)
440 {
441         uint16_t i;
442
443         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
444                 /* Using shared name field to find a free port. */
445                 if (eth_dev_shared_data->data[i].name[0] == '\0') {
446                         RTE_ASSERT(rte_eth_devices[i].state ==
447                                    RTE_ETH_DEV_UNUSED);
448                         return i;
449                 }
450         }
451         return RTE_MAX_ETHPORTS;
452 }
453
454 static struct rte_eth_dev *
455 eth_dev_get(uint16_t port_id)
456 {
457         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
458
459         eth_dev->data = &eth_dev_shared_data->data[port_id];
460
461         return eth_dev;
462 }
463
464 struct rte_eth_dev *
465 rte_eth_dev_allocate(const char *name)
466 {
467         uint16_t port_id;
468         struct rte_eth_dev *eth_dev = NULL;
469         size_t name_len;
470
471         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
472         if (name_len == 0) {
473                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
474                 return NULL;
475         }
476
477         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
478                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
479                 return NULL;
480         }
481
482         eth_dev_shared_data_prepare();
483
484         /* Synchronize port creation between primary and secondary threads. */
485         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
486
487         if (eth_dev_allocated(name) != NULL) {
488                 RTE_ETHDEV_LOG(ERR,
489                         "Ethernet device with name %s already allocated\n",
490                         name);
491                 goto unlock;
492         }
493
494         port_id = eth_dev_find_free_port();
495         if (port_id == RTE_MAX_ETHPORTS) {
496                 RTE_ETHDEV_LOG(ERR,
497                         "Reached maximum number of Ethernet ports\n");
498                 goto unlock;
499         }
500
501         eth_dev = eth_dev_get(port_id);
502         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
503         eth_dev->data->port_id = port_id;
504         eth_dev->data->mtu = RTE_ETHER_MTU;
505         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
506
507 unlock:
508         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
509
510         return eth_dev;
511 }
512
513 /*
514  * Attach to a port already registered by the primary process, which
515  * makes sure that the same device would have the same port id both
516  * in the primary and secondary process.
517  */
518 struct rte_eth_dev *
519 rte_eth_dev_attach_secondary(const char *name)
520 {
521         uint16_t i;
522         struct rte_eth_dev *eth_dev = NULL;
523
524         eth_dev_shared_data_prepare();
525
526         /* Synchronize port attachment to primary port creation and release. */
527         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
528
529         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
530                 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
531                         break;
532         }
533         if (i == RTE_MAX_ETHPORTS) {
534                 RTE_ETHDEV_LOG(ERR,
535                         "Device %s is not driven by the primary process\n",
536                         name);
537         } else {
538                 eth_dev = eth_dev_get(i);
539                 RTE_ASSERT(eth_dev->data->port_id == i);
540         }
541
542         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
543         return eth_dev;
544 }
545
546 int
547 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
548 {
549         if (eth_dev == NULL)
550                 return -EINVAL;
551
552         eth_dev_shared_data_prepare();
553
554         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
555                 rte_eth_dev_callback_process(eth_dev,
556                                 RTE_ETH_EVENT_DESTROY, NULL);
557
558         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
559
560         eth_dev->state = RTE_ETH_DEV_UNUSED;
561         eth_dev->device = NULL;
562         eth_dev->process_private = NULL;
563         eth_dev->intr_handle = NULL;
564         eth_dev->rx_pkt_burst = NULL;
565         eth_dev->tx_pkt_burst = NULL;
566         eth_dev->tx_pkt_prepare = NULL;
567         eth_dev->rx_queue_count = NULL;
568         eth_dev->rx_descriptor_done = NULL;
569         eth_dev->rx_descriptor_status = NULL;
570         eth_dev->tx_descriptor_status = NULL;
571         eth_dev->dev_ops = NULL;
572
573         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
574                 rte_free(eth_dev->data->rx_queues);
575                 rte_free(eth_dev->data->tx_queues);
576                 rte_free(eth_dev->data->mac_addrs);
577                 rte_free(eth_dev->data->hash_mac_addrs);
578                 rte_free(eth_dev->data->dev_private);
579                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
580                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
581         }
582
583         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
584
585         return 0;
586 }
587
588 int
589 rte_eth_dev_is_valid_port(uint16_t port_id)
590 {
591         if (port_id >= RTE_MAX_ETHPORTS ||
592             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
593                 return 0;
594         else
595                 return 1;
596 }
597
598 static int
599 eth_is_valid_owner_id(uint64_t owner_id)
600 {
601         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
602             eth_dev_shared_data->next_owner_id <= owner_id)
603                 return 0;
604         return 1;
605 }
606
607 uint64_t
608 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
609 {
610         port_id = rte_eth_find_next(port_id);
611         while (port_id < RTE_MAX_ETHPORTS &&
612                         rte_eth_devices[port_id].data->owner.id != owner_id)
613                 port_id = rte_eth_find_next(port_id + 1);
614
615         return port_id;
616 }
617
618 int
619 rte_eth_dev_owner_new(uint64_t *owner_id)
620 {
621         eth_dev_shared_data_prepare();
622
623         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
624
625         *owner_id = eth_dev_shared_data->next_owner_id++;
626
627         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
628         return 0;
629 }
630
631 static int
632 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
633                        const struct rte_eth_dev_owner *new_owner)
634 {
635         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
636         struct rte_eth_dev_owner *port_owner;
637
638         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
639                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
640                         port_id);
641                 return -ENODEV;
642         }
643
644         if (!eth_is_valid_owner_id(new_owner->id) &&
645             !eth_is_valid_owner_id(old_owner_id)) {
646                 RTE_ETHDEV_LOG(ERR,
647                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
648                        old_owner_id, new_owner->id);
649                 return -EINVAL;
650         }
651
652         port_owner = &rte_eth_devices[port_id].data->owner;
653         if (port_owner->id != old_owner_id) {
654                 RTE_ETHDEV_LOG(ERR,
655                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
656                         port_id, port_owner->name, port_owner->id);
657                 return -EPERM;
658         }
659
660         /* can not truncate (same structure) */
661         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
662
663         port_owner->id = new_owner->id;
664
665         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
666                 port_id, new_owner->name, new_owner->id);
667
668         return 0;
669 }
670
671 int
672 rte_eth_dev_owner_set(const uint16_t port_id,
673                       const struct rte_eth_dev_owner *owner)
674 {
675         int ret;
676
677         eth_dev_shared_data_prepare();
678
679         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
680
681         ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
682
683         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
684         return ret;
685 }
686
687 int
688 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
689 {
690         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
691                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
692         int ret;
693
694         eth_dev_shared_data_prepare();
695
696         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
697
698         ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
699
700         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
701         return ret;
702 }
703
704 int
705 rte_eth_dev_owner_delete(const uint64_t owner_id)
706 {
707         uint16_t port_id;
708         int ret = 0;
709
710         eth_dev_shared_data_prepare();
711
712         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
713
714         if (eth_is_valid_owner_id(owner_id)) {
715                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
716                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
717                                 memset(&rte_eth_devices[port_id].data->owner, 0,
718                                        sizeof(struct rte_eth_dev_owner));
719                 RTE_ETHDEV_LOG(NOTICE,
720                         "All port owners owned by %016"PRIx64" identifier have removed\n",
721                         owner_id);
722         } else {
723                 RTE_ETHDEV_LOG(ERR,
724                                "Invalid owner id=%016"PRIx64"\n",
725                                owner_id);
726                 ret = -EINVAL;
727         }
728
729         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
730
731         return ret;
732 }
733
734 int
735 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
736 {
737         int ret = 0;
738         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
739
740         eth_dev_shared_data_prepare();
741
742         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
743
744         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
745                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
746                         port_id);
747                 ret = -ENODEV;
748         } else {
749                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
750         }
751
752         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
753         return ret;
754 }
755
756 int
757 rte_eth_dev_socket_id(uint16_t port_id)
758 {
759         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
760         return rte_eth_devices[port_id].data->numa_node;
761 }
762
763 void *
764 rte_eth_dev_get_sec_ctx(uint16_t port_id)
765 {
766         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
767         return rte_eth_devices[port_id].security_ctx;
768 }
769
770 uint16_t
771 rte_eth_dev_count_avail(void)
772 {
773         uint16_t p;
774         uint16_t count;
775
776         count = 0;
777
778         RTE_ETH_FOREACH_DEV(p)
779                 count++;
780
781         return count;
782 }
783
784 uint16_t
785 rte_eth_dev_count_total(void)
786 {
787         uint16_t port, count = 0;
788
789         RTE_ETH_FOREACH_VALID_DEV(port)
790                 count++;
791
792         return count;
793 }
794
795 int
796 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
797 {
798         char *tmp;
799
800         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
801
802         if (name == NULL) {
803                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
804                 return -EINVAL;
805         }
806
807         /* shouldn't check 'rte_eth_devices[i].data',
808          * because it might be overwritten by VDEV PMD */
809         tmp = eth_dev_shared_data->data[port_id].name;
810         strcpy(name, tmp);
811         return 0;
812 }
813
814 int
815 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
816 {
817         uint16_t pid;
818
819         if (name == NULL) {
820                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
821                 return -EINVAL;
822         }
823
824         RTE_ETH_FOREACH_VALID_DEV(pid)
825                 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
826                         *port_id = pid;
827                         return 0;
828                 }
829
830         return -ENODEV;
831 }
832
833 static int
834 eth_err(uint16_t port_id, int ret)
835 {
836         if (ret == 0)
837                 return 0;
838         if (rte_eth_dev_is_removed(port_id))
839                 return -EIO;
840         return ret;
841 }
842
843 static int
844 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
845 {
846         uint16_t old_nb_queues = dev->data->nb_rx_queues;
847         void **rxq;
848         unsigned i;
849
850         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
851                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
852                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
853                                 RTE_CACHE_LINE_SIZE);
854                 if (dev->data->rx_queues == NULL) {
855                         dev->data->nb_rx_queues = 0;
856                         return -(ENOMEM);
857                 }
858         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
859                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
860
861                 rxq = dev->data->rx_queues;
862
863                 for (i = nb_queues; i < old_nb_queues; i++)
864                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
865                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
866                                 RTE_CACHE_LINE_SIZE);
867                 if (rxq == NULL)
868                         return -(ENOMEM);
869                 if (nb_queues > old_nb_queues) {
870                         uint16_t new_qs = nb_queues - old_nb_queues;
871
872                         memset(rxq + old_nb_queues, 0,
873                                 sizeof(rxq[0]) * new_qs);
874                 }
875
876                 dev->data->rx_queues = rxq;
877
878         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
879                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
880
881                 rxq = dev->data->rx_queues;
882
883                 for (i = nb_queues; i < old_nb_queues; i++)
884                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
885
886                 rte_free(dev->data->rx_queues);
887                 dev->data->rx_queues = NULL;
888         }
889         dev->data->nb_rx_queues = nb_queues;
890         return 0;
891 }
892
893 static int
894 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
895 {
896         uint16_t port_id;
897
898         if (rx_queue_id >= dev->data->nb_rx_queues) {
899                 port_id = dev->data->port_id;
900                 RTE_ETHDEV_LOG(ERR,
901                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
902                                rx_queue_id, port_id);
903                 return -EINVAL;
904         }
905
906         if (dev->data->rx_queues[rx_queue_id] == NULL) {
907                 port_id = dev->data->port_id;
908                 RTE_ETHDEV_LOG(ERR,
909                                "Queue %u of device with port_id=%u has not been setup\n",
910                                rx_queue_id, port_id);
911                 return -EINVAL;
912         }
913
914         return 0;
915 }
916
917 static int
918 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
919 {
920         uint16_t port_id;
921
922         if (tx_queue_id >= dev->data->nb_tx_queues) {
923                 port_id = dev->data->port_id;
924                 RTE_ETHDEV_LOG(ERR,
925                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
926                                tx_queue_id, port_id);
927                 return -EINVAL;
928         }
929
930         if (dev->data->tx_queues[tx_queue_id] == NULL) {
931                 port_id = dev->data->port_id;
932                 RTE_ETHDEV_LOG(ERR,
933                                "Queue %u of device with port_id=%u has not been setup\n",
934                                tx_queue_id, port_id);
935                 return -EINVAL;
936         }
937
938         return 0;
939 }
940
941 int
942 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
943 {
944         struct rte_eth_dev *dev;
945         int ret;
946
947         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
948
949         dev = &rte_eth_devices[port_id];
950         if (!dev->data->dev_started) {
951                 RTE_ETHDEV_LOG(ERR,
952                         "Port %u must be started before start any queue\n",
953                         port_id);
954                 return -EINVAL;
955         }
956
957         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
958         if (ret != 0)
959                 return ret;
960
961         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
962
963         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
964                 RTE_ETHDEV_LOG(INFO,
965                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
966                         rx_queue_id, port_id);
967                 return -EINVAL;
968         }
969
970         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
971                 RTE_ETHDEV_LOG(INFO,
972                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
973                         rx_queue_id, port_id);
974                 return 0;
975         }
976
977         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
978                                                              rx_queue_id));
979
980 }
981
982 int
983 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
984 {
985         struct rte_eth_dev *dev;
986         int ret;
987
988         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
989
990         dev = &rte_eth_devices[port_id];
991
992         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
993         if (ret != 0)
994                 return ret;
995
996         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
997
998         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
999                 RTE_ETHDEV_LOG(INFO,
1000                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1001                         rx_queue_id, port_id);
1002                 return -EINVAL;
1003         }
1004
1005         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1006                 RTE_ETHDEV_LOG(INFO,
1007                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1008                         rx_queue_id, port_id);
1009                 return 0;
1010         }
1011
1012         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1013
1014 }
1015
1016 int
1017 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1018 {
1019         struct rte_eth_dev *dev;
1020         int ret;
1021
1022         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1023
1024         dev = &rte_eth_devices[port_id];
1025         if (!dev->data->dev_started) {
1026                 RTE_ETHDEV_LOG(ERR,
1027                         "Port %u must be started before start any queue\n",
1028                         port_id);
1029                 return -EINVAL;
1030         }
1031
1032         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1033         if (ret != 0)
1034                 return ret;
1035
1036         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1037
1038         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1039                 RTE_ETHDEV_LOG(INFO,
1040                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1041                         tx_queue_id, port_id);
1042                 return -EINVAL;
1043         }
1044
1045         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1046                 RTE_ETHDEV_LOG(INFO,
1047                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1048                         tx_queue_id, port_id);
1049                 return 0;
1050         }
1051
1052         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1053 }
1054
1055 int
1056 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1057 {
1058         struct rte_eth_dev *dev;
1059         int ret;
1060
1061         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1062
1063         dev = &rte_eth_devices[port_id];
1064
1065         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1066         if (ret != 0)
1067                 return ret;
1068
1069         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1070
1071         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1072                 RTE_ETHDEV_LOG(INFO,
1073                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1074                         tx_queue_id, port_id);
1075                 return -EINVAL;
1076         }
1077
1078         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1079                 RTE_ETHDEV_LOG(INFO,
1080                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1081                         tx_queue_id, port_id);
1082                 return 0;
1083         }
1084
1085         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1086
1087 }
1088
1089 static int
1090 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1091 {
1092         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1093         void **txq;
1094         unsigned i;
1095
1096         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1097                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1098                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1099                                                    RTE_CACHE_LINE_SIZE);
1100                 if (dev->data->tx_queues == NULL) {
1101                         dev->data->nb_tx_queues = 0;
1102                         return -(ENOMEM);
1103                 }
1104         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1105                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1106
1107                 txq = dev->data->tx_queues;
1108
1109                 for (i = nb_queues; i < old_nb_queues; i++)
1110                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1111                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1112                                   RTE_CACHE_LINE_SIZE);
1113                 if (txq == NULL)
1114                         return -ENOMEM;
1115                 if (nb_queues > old_nb_queues) {
1116                         uint16_t new_qs = nb_queues - old_nb_queues;
1117
1118                         memset(txq + old_nb_queues, 0,
1119                                sizeof(txq[0]) * new_qs);
1120                 }
1121
1122                 dev->data->tx_queues = txq;
1123
1124         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1125                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1126
1127                 txq = dev->data->tx_queues;
1128
1129                 for (i = nb_queues; i < old_nb_queues; i++)
1130                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1131
1132                 rte_free(dev->data->tx_queues);
1133                 dev->data->tx_queues = NULL;
1134         }
1135         dev->data->nb_tx_queues = nb_queues;
1136         return 0;
1137 }
1138
1139 uint32_t
1140 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1141 {
1142         switch (speed) {
1143         case ETH_SPEED_NUM_10M:
1144                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1145         case ETH_SPEED_NUM_100M:
1146                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1147         case ETH_SPEED_NUM_1G:
1148                 return ETH_LINK_SPEED_1G;
1149         case ETH_SPEED_NUM_2_5G:
1150                 return ETH_LINK_SPEED_2_5G;
1151         case ETH_SPEED_NUM_5G:
1152                 return ETH_LINK_SPEED_5G;
1153         case ETH_SPEED_NUM_10G:
1154                 return ETH_LINK_SPEED_10G;
1155         case ETH_SPEED_NUM_20G:
1156                 return ETH_LINK_SPEED_20G;
1157         case ETH_SPEED_NUM_25G:
1158                 return ETH_LINK_SPEED_25G;
1159         case ETH_SPEED_NUM_40G:
1160                 return ETH_LINK_SPEED_40G;
1161         case ETH_SPEED_NUM_50G:
1162                 return ETH_LINK_SPEED_50G;
1163         case ETH_SPEED_NUM_56G:
1164                 return ETH_LINK_SPEED_56G;
1165         case ETH_SPEED_NUM_100G:
1166                 return ETH_LINK_SPEED_100G;
1167         case ETH_SPEED_NUM_200G:
1168                 return ETH_LINK_SPEED_200G;
1169         default:
1170                 return 0;
1171         }
1172 }
1173
1174 const char *
1175 rte_eth_dev_rx_offload_name(uint64_t offload)
1176 {
1177         const char *name = "UNKNOWN";
1178         unsigned int i;
1179
1180         for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1181                 if (offload == eth_dev_rx_offload_names[i].offload) {
1182                         name = eth_dev_rx_offload_names[i].name;
1183                         break;
1184                 }
1185         }
1186
1187         return name;
1188 }
1189
1190 const char *
1191 rte_eth_dev_tx_offload_name(uint64_t offload)
1192 {
1193         const char *name = "UNKNOWN";
1194         unsigned int i;
1195
1196         for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1197                 if (offload == eth_dev_tx_offload_names[i].offload) {
1198                         name = eth_dev_tx_offload_names[i].name;
1199                         break;
1200                 }
1201         }
1202
1203         return name;
1204 }
1205
1206 static inline int
1207 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1208                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1209 {
1210         int ret = 0;
1211
1212         if (dev_info_size == 0) {
1213                 if (config_size != max_rx_pkt_len) {
1214                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1215                                        " %u != %u is not allowed\n",
1216                                        port_id, config_size, max_rx_pkt_len);
1217                         ret = -EINVAL;
1218                 }
1219         } else if (config_size > dev_info_size) {
1220                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1221                                "> max allowed value %u\n", port_id, config_size,
1222                                dev_info_size);
1223                 ret = -EINVAL;
1224         } else if (config_size < RTE_ETHER_MIN_LEN) {
1225                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1226                                "< min allowed value %u\n", port_id, config_size,
1227                                (unsigned int)RTE_ETHER_MIN_LEN);
1228                 ret = -EINVAL;
1229         }
1230         return ret;
1231 }
1232
1233 /*
1234  * Validate offloads that are requested through rte_eth_dev_configure against
1235  * the offloads successfully set by the ethernet device.
1236  *
1237  * @param port_id
1238  *   The port identifier of the Ethernet device.
1239  * @param req_offloads
1240  *   The offloads that have been requested through `rte_eth_dev_configure`.
1241  * @param set_offloads
1242  *   The offloads successfully set by the ethernet device.
1243  * @param offload_type
1244  *   The offload type i.e. Rx/Tx string.
1245  * @param offload_name
1246  *   The function that prints the offload name.
1247  * @return
1248  *   - (0) if validation successful.
1249  *   - (-EINVAL) if requested offload has been silently disabled.
1250  *
1251  */
1252 static int
1253 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1254                   uint64_t set_offloads, const char *offload_type,
1255                   const char *(*offload_name)(uint64_t))
1256 {
1257         uint64_t offloads_diff = req_offloads ^ set_offloads;
1258         uint64_t offload;
1259         int ret = 0;
1260
1261         while (offloads_diff != 0) {
1262                 /* Check if any offload is requested but not enabled. */
1263                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1264                 if (offload & req_offloads) {
1265                         RTE_ETHDEV_LOG(ERR,
1266                                 "Port %u failed to enable %s offload %s\n",
1267                                 port_id, offload_type, offload_name(offload));
1268                         ret = -EINVAL;
1269                 }
1270
1271                 /* Check if offload couldn't be disabled. */
1272                 if (offload & set_offloads) {
1273                         RTE_ETHDEV_LOG(DEBUG,
1274                                 "Port %u %s offload %s is not requested but enabled\n",
1275                                 port_id, offload_type, offload_name(offload));
1276                 }
1277
1278                 offloads_diff &= ~offload;
1279         }
1280
1281         return ret;
1282 }
1283
1284 int
1285 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1286                       const struct rte_eth_conf *dev_conf)
1287 {
1288         struct rte_eth_dev *dev;
1289         struct rte_eth_dev_info dev_info;
1290         struct rte_eth_conf orig_conf;
1291         uint16_t overhead_len;
1292         int diag;
1293         int ret;
1294         uint16_t old_mtu;
1295
1296         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1297
1298         dev = &rte_eth_devices[port_id];
1299
1300         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1301
1302         if (dev->data->dev_started) {
1303                 RTE_ETHDEV_LOG(ERR,
1304                         "Port %u must be stopped to allow configuration\n",
1305                         port_id);
1306                 return -EBUSY;
1307         }
1308
1309          /* Store original config, as rollback required on failure */
1310         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1311
1312         /*
1313          * Copy the dev_conf parameter into the dev structure.
1314          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1315          */
1316         if (dev_conf != &dev->data->dev_conf)
1317                 memcpy(&dev->data->dev_conf, dev_conf,
1318                        sizeof(dev->data->dev_conf));
1319
1320         /* Backup mtu for rollback */
1321         old_mtu = dev->data->mtu;
1322
1323         ret = rte_eth_dev_info_get(port_id, &dev_info);
1324         if (ret != 0)
1325                 goto rollback;
1326
1327         /* Get the real Ethernet overhead length */
1328         if (dev_info.max_mtu != UINT16_MAX &&
1329             dev_info.max_rx_pktlen > dev_info.max_mtu)
1330                 overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
1331         else
1332                 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1333
1334         /* If number of queues specified by application for both Rx and Tx is
1335          * zero, use driver preferred values. This cannot be done individually
1336          * as it is valid for either Tx or Rx (but not both) to be zero.
1337          * If driver does not provide any preferred valued, fall back on
1338          * EAL defaults.
1339          */
1340         if (nb_rx_q == 0 && nb_tx_q == 0) {
1341                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1342                 if (nb_rx_q == 0)
1343                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1344                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1345                 if (nb_tx_q == 0)
1346                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1347         }
1348
1349         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1350                 RTE_ETHDEV_LOG(ERR,
1351                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1352                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1353                 ret = -EINVAL;
1354                 goto rollback;
1355         }
1356
1357         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1358                 RTE_ETHDEV_LOG(ERR,
1359                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1360                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1361                 ret = -EINVAL;
1362                 goto rollback;
1363         }
1364
1365         /*
1366          * Check that the numbers of RX and TX queues are not greater
1367          * than the maximum number of RX and TX queues supported by the
1368          * configured device.
1369          */
1370         if (nb_rx_q > dev_info.max_rx_queues) {
1371                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1372                         port_id, nb_rx_q, dev_info.max_rx_queues);
1373                 ret = -EINVAL;
1374                 goto rollback;
1375         }
1376
1377         if (nb_tx_q > dev_info.max_tx_queues) {
1378                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1379                         port_id, nb_tx_q, dev_info.max_tx_queues);
1380                 ret = -EINVAL;
1381                 goto rollback;
1382         }
1383
1384         /* Check that the device supports requested interrupts */
1385         if ((dev_conf->intr_conf.lsc == 1) &&
1386                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1387                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1388                         dev->device->driver->name);
1389                 ret = -EINVAL;
1390                 goto rollback;
1391         }
1392         if ((dev_conf->intr_conf.rmv == 1) &&
1393                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1394                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1395                         dev->device->driver->name);
1396                 ret = -EINVAL;
1397                 goto rollback;
1398         }
1399
1400         /*
1401          * If jumbo frames are enabled, check that the maximum RX packet
1402          * length is supported by the configured device.
1403          */
1404         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1405                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1406                         RTE_ETHDEV_LOG(ERR,
1407                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1408                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1409                                 dev_info.max_rx_pktlen);
1410                         ret = -EINVAL;
1411                         goto rollback;
1412                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1413                         RTE_ETHDEV_LOG(ERR,
1414                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1415                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1416                                 (unsigned int)RTE_ETHER_MIN_LEN);
1417                         ret = -EINVAL;
1418                         goto rollback;
1419                 }
1420
1421                 /* Scale the MTU size to adapt max_rx_pkt_len */
1422                 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
1423                                 overhead_len;
1424         } else {
1425                 uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
1426                 if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
1427                     pktlen > RTE_ETHER_MTU + overhead_len)
1428                         /* Use default value */
1429                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1430                                                 RTE_ETHER_MTU + overhead_len;
1431         }
1432
1433         /*
1434          * If LRO is enabled, check that the maximum aggregated packet
1435          * size is supported by the configured device.
1436          */
1437         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1438                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1439                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1440                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1441                 ret = eth_dev_check_lro_pkt_size(port_id,
1442                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1443                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1444                                 dev_info.max_lro_pkt_size);
1445                 if (ret != 0)
1446                         goto rollback;
1447         }
1448
1449         /* Any requested offloading must be within its device capabilities */
1450         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1451              dev_conf->rxmode.offloads) {
1452                 RTE_ETHDEV_LOG(ERR,
1453                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1454                         "capabilities 0x%"PRIx64" in %s()\n",
1455                         port_id, dev_conf->rxmode.offloads,
1456                         dev_info.rx_offload_capa,
1457                         __func__);
1458                 ret = -EINVAL;
1459                 goto rollback;
1460         }
1461         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1462              dev_conf->txmode.offloads) {
1463                 RTE_ETHDEV_LOG(ERR,
1464                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1465                         "capabilities 0x%"PRIx64" in %s()\n",
1466                         port_id, dev_conf->txmode.offloads,
1467                         dev_info.tx_offload_capa,
1468                         __func__);
1469                 ret = -EINVAL;
1470                 goto rollback;
1471         }
1472
1473         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1474                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1475
1476         /* Check that device supports requested rss hash functions. */
1477         if ((dev_info.flow_type_rss_offloads |
1478              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1479             dev_info.flow_type_rss_offloads) {
1480                 RTE_ETHDEV_LOG(ERR,
1481                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1482                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1483                         dev_info.flow_type_rss_offloads);
1484                 ret = -EINVAL;
1485                 goto rollback;
1486         }
1487
1488         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1489         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1490             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1491                 RTE_ETHDEV_LOG(ERR,
1492                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1493                         port_id,
1494                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1495                 ret = -EINVAL;
1496                 goto rollback;
1497         }
1498
1499         /*
1500          * Setup new number of RX/TX queues and reconfigure device.
1501          */
1502         diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1503         if (diag != 0) {
1504                 RTE_ETHDEV_LOG(ERR,
1505                         "Port%u eth_dev_rx_queue_config = %d\n",
1506                         port_id, diag);
1507                 ret = diag;
1508                 goto rollback;
1509         }
1510
1511         diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1512         if (diag != 0) {
1513                 RTE_ETHDEV_LOG(ERR,
1514                         "Port%u eth_dev_tx_queue_config = %d\n",
1515                         port_id, diag);
1516                 eth_dev_rx_queue_config(dev, 0);
1517                 ret = diag;
1518                 goto rollback;
1519         }
1520
1521         diag = (*dev->dev_ops->dev_configure)(dev);
1522         if (diag != 0) {
1523                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1524                         port_id, diag);
1525                 ret = eth_err(port_id, diag);
1526                 goto reset_queues;
1527         }
1528
1529         /* Initialize Rx profiling if enabled at compilation time. */
1530         diag = __rte_eth_dev_profile_init(port_id, dev);
1531         if (diag != 0) {
1532                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1533                         port_id, diag);
1534                 ret = eth_err(port_id, diag);
1535                 goto reset_queues;
1536         }
1537
1538         /* Validate Rx offloads. */
1539         diag = eth_dev_validate_offloads(port_id,
1540                         dev_conf->rxmode.offloads,
1541                         dev->data->dev_conf.rxmode.offloads, "Rx",
1542                         rte_eth_dev_rx_offload_name);
1543         if (diag != 0) {
1544                 ret = diag;
1545                 goto reset_queues;
1546         }
1547
1548         /* Validate Tx offloads. */
1549         diag = eth_dev_validate_offloads(port_id,
1550                         dev_conf->txmode.offloads,
1551                         dev->data->dev_conf.txmode.offloads, "Tx",
1552                         rte_eth_dev_tx_offload_name);
1553         if (diag != 0) {
1554                 ret = diag;
1555                 goto reset_queues;
1556         }
1557
1558         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1559         return 0;
1560 reset_queues:
1561         eth_dev_rx_queue_config(dev, 0);
1562         eth_dev_tx_queue_config(dev, 0);
1563 rollback:
1564         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1565         if (old_mtu != dev->data->mtu)
1566                 dev->data->mtu = old_mtu;
1567
1568         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1569         return ret;
1570 }
1571
1572 void
1573 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1574 {
1575         if (dev->data->dev_started) {
1576                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1577                         dev->data->port_id);
1578                 return;
1579         }
1580
1581         eth_dev_rx_queue_config(dev, 0);
1582         eth_dev_tx_queue_config(dev, 0);
1583
1584         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1585 }
1586
1587 static void
1588 eth_dev_mac_restore(struct rte_eth_dev *dev,
1589                         struct rte_eth_dev_info *dev_info)
1590 {
1591         struct rte_ether_addr *addr;
1592         uint16_t i;
1593         uint32_t pool = 0;
1594         uint64_t pool_mask;
1595
1596         /* replay MAC address configuration including default MAC */
1597         addr = &dev->data->mac_addrs[0];
1598         if (*dev->dev_ops->mac_addr_set != NULL)
1599                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1600         else if (*dev->dev_ops->mac_addr_add != NULL)
1601                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1602
1603         if (*dev->dev_ops->mac_addr_add != NULL) {
1604                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1605                         addr = &dev->data->mac_addrs[i];
1606
1607                         /* skip zero address */
1608                         if (rte_is_zero_ether_addr(addr))
1609                                 continue;
1610
1611                         pool = 0;
1612                         pool_mask = dev->data->mac_pool_sel[i];
1613
1614                         do {
1615                                 if (pool_mask & 1ULL)
1616                                         (*dev->dev_ops->mac_addr_add)(dev,
1617                                                 addr, i, pool);
1618                                 pool_mask >>= 1;
1619                                 pool++;
1620                         } while (pool_mask);
1621                 }
1622         }
1623 }
1624
1625 static int
1626 eth_dev_config_restore(struct rte_eth_dev *dev,
1627                 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1628 {
1629         int ret;
1630
1631         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1632                 eth_dev_mac_restore(dev, dev_info);
1633
1634         /* replay promiscuous configuration */
1635         /*
1636          * use callbacks directly since we don't need port_id check and
1637          * would like to bypass the same value set
1638          */
1639         if (rte_eth_promiscuous_get(port_id) == 1 &&
1640             *dev->dev_ops->promiscuous_enable != NULL) {
1641                 ret = eth_err(port_id,
1642                               (*dev->dev_ops->promiscuous_enable)(dev));
1643                 if (ret != 0 && ret != -ENOTSUP) {
1644                         RTE_ETHDEV_LOG(ERR,
1645                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1646                                 port_id, rte_strerror(-ret));
1647                         return ret;
1648                 }
1649         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1650                    *dev->dev_ops->promiscuous_disable != NULL) {
1651                 ret = eth_err(port_id,
1652                               (*dev->dev_ops->promiscuous_disable)(dev));
1653                 if (ret != 0 && ret != -ENOTSUP) {
1654                         RTE_ETHDEV_LOG(ERR,
1655                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1656                                 port_id, rte_strerror(-ret));
1657                         return ret;
1658                 }
1659         }
1660
1661         /* replay all multicast configuration */
1662         /*
1663          * use callbacks directly since we don't need port_id check and
1664          * would like to bypass the same value set
1665          */
1666         if (rte_eth_allmulticast_get(port_id) == 1 &&
1667             *dev->dev_ops->allmulticast_enable != NULL) {
1668                 ret = eth_err(port_id,
1669                               (*dev->dev_ops->allmulticast_enable)(dev));
1670                 if (ret != 0 && ret != -ENOTSUP) {
1671                         RTE_ETHDEV_LOG(ERR,
1672                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1673                                 port_id, rte_strerror(-ret));
1674                         return ret;
1675                 }
1676         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1677                    *dev->dev_ops->allmulticast_disable != NULL) {
1678                 ret = eth_err(port_id,
1679                               (*dev->dev_ops->allmulticast_disable)(dev));
1680                 if (ret != 0 && ret != -ENOTSUP) {
1681                         RTE_ETHDEV_LOG(ERR,
1682                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1683                                 port_id, rte_strerror(-ret));
1684                         return ret;
1685                 }
1686         }
1687
1688         return 0;
1689 }
1690
1691 int
1692 rte_eth_dev_start(uint16_t port_id)
1693 {
1694         struct rte_eth_dev *dev;
1695         struct rte_eth_dev_info dev_info;
1696         int diag;
1697         int ret, ret_stop;
1698
1699         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1700
1701         dev = &rte_eth_devices[port_id];
1702
1703         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1704
1705         if (dev->data->dev_started != 0) {
1706                 RTE_ETHDEV_LOG(INFO,
1707                         "Device with port_id=%"PRIu16" already started\n",
1708                         port_id);
1709                 return 0;
1710         }
1711
1712         ret = rte_eth_dev_info_get(port_id, &dev_info);
1713         if (ret != 0)
1714                 return ret;
1715
1716         /* Lets restore MAC now if device does not support live change */
1717         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1718                 eth_dev_mac_restore(dev, &dev_info);
1719
1720         diag = (*dev->dev_ops->dev_start)(dev);
1721         if (diag == 0)
1722                 dev->data->dev_started = 1;
1723         else
1724                 return eth_err(port_id, diag);
1725
1726         ret = eth_dev_config_restore(dev, &dev_info, port_id);
1727         if (ret != 0) {
1728                 RTE_ETHDEV_LOG(ERR,
1729                         "Error during restoring configuration for device (port %u): %s\n",
1730                         port_id, rte_strerror(-ret));
1731                 ret_stop = rte_eth_dev_stop(port_id);
1732                 if (ret_stop != 0) {
1733                         RTE_ETHDEV_LOG(ERR,
1734                                 "Failed to stop device (port %u): %s\n",
1735                                 port_id, rte_strerror(-ret_stop));
1736                 }
1737
1738                 return ret;
1739         }
1740
1741         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1742                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1743                 (*dev->dev_ops->link_update)(dev, 0);
1744         }
1745
1746         rte_ethdev_trace_start(port_id);
1747         return 0;
1748 }
1749
1750 int
1751 rte_eth_dev_stop(uint16_t port_id)
1752 {
1753         struct rte_eth_dev *dev;
1754         int ret;
1755
1756         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1757         dev = &rte_eth_devices[port_id];
1758
1759         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1760
1761         if (dev->data->dev_started == 0) {
1762                 RTE_ETHDEV_LOG(INFO,
1763                         "Device with port_id=%"PRIu16" already stopped\n",
1764                         port_id);
1765                 return 0;
1766         }
1767
1768         dev->data->dev_started = 0;
1769         ret = (*dev->dev_ops->dev_stop)(dev);
1770         rte_ethdev_trace_stop(port_id, ret);
1771
1772         return ret;
1773 }
1774
1775 int
1776 rte_eth_dev_set_link_up(uint16_t port_id)
1777 {
1778         struct rte_eth_dev *dev;
1779
1780         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1781
1782         dev = &rte_eth_devices[port_id];
1783
1784         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1785         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1786 }
1787
1788 int
1789 rte_eth_dev_set_link_down(uint16_t port_id)
1790 {
1791         struct rte_eth_dev *dev;
1792
1793         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1794
1795         dev = &rte_eth_devices[port_id];
1796
1797         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1798         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1799 }
1800
1801 int
1802 rte_eth_dev_close(uint16_t port_id)
1803 {
1804         struct rte_eth_dev *dev;
1805         int firsterr, binerr;
1806         int *lasterr = &firsterr;
1807
1808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1809         dev = &rte_eth_devices[port_id];
1810
1811         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1812         *lasterr = (*dev->dev_ops->dev_close)(dev);
1813         if (*lasterr != 0)
1814                 lasterr = &binerr;
1815
1816         rte_ethdev_trace_close(port_id);
1817         *lasterr = rte_eth_dev_release_port(dev);
1818
1819         return firsterr;
1820 }
1821
1822 int
1823 rte_eth_dev_reset(uint16_t port_id)
1824 {
1825         struct rte_eth_dev *dev;
1826         int ret;
1827
1828         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1829         dev = &rte_eth_devices[port_id];
1830
1831         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1832
1833         ret = rte_eth_dev_stop(port_id);
1834         if (ret != 0) {
1835                 RTE_ETHDEV_LOG(ERR,
1836                         "Failed to stop device (port %u) before reset: %s - ignore\n",
1837                         port_id, rte_strerror(-ret));
1838         }
1839         ret = dev->dev_ops->dev_reset(dev);
1840
1841         return eth_err(port_id, ret);
1842 }
1843
1844 int
1845 rte_eth_dev_is_removed(uint16_t port_id)
1846 {
1847         struct rte_eth_dev *dev;
1848         int ret;
1849
1850         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1851
1852         dev = &rte_eth_devices[port_id];
1853
1854         if (dev->state == RTE_ETH_DEV_REMOVED)
1855                 return 1;
1856
1857         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1858
1859         ret = dev->dev_ops->is_removed(dev);
1860         if (ret != 0)
1861                 /* Device is physically removed. */
1862                 dev->state = RTE_ETH_DEV_REMOVED;
1863
1864         return ret;
1865 }
1866
1867 static int
1868 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1869                              uint16_t n_seg, uint32_t *mbp_buf_size,
1870                              const struct rte_eth_dev_info *dev_info)
1871 {
1872         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1873         struct rte_mempool *mp_first;
1874         uint32_t offset_mask;
1875         uint16_t seg_idx;
1876
1877         if (n_seg > seg_capa->max_nseg) {
1878                 RTE_ETHDEV_LOG(ERR,
1879                                "Requested Rx segments %u exceed supported %u\n",
1880                                n_seg, seg_capa->max_nseg);
1881                 return -EINVAL;
1882         }
1883         /*
1884          * Check the sizes and offsets against buffer sizes
1885          * for each segment specified in extended configuration.
1886          */
1887         mp_first = rx_seg[0].mp;
1888         offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1889         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1890                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1891                 uint32_t length = rx_seg[seg_idx].length;
1892                 uint32_t offset = rx_seg[seg_idx].offset;
1893
1894                 if (mpl == NULL) {
1895                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1896                         return -EINVAL;
1897                 }
1898                 if (seg_idx != 0 && mp_first != mpl &&
1899                     seg_capa->multi_pools == 0) {
1900                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1901                         return -ENOTSUP;
1902                 }
1903                 if (offset != 0) {
1904                         if (seg_capa->offset_allowed == 0) {
1905                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1906                                 return -ENOTSUP;
1907                         }
1908                         if (offset & offset_mask) {
1909                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1910                                                offset,
1911                                                seg_capa->offset_align_log2);
1912                                 return -EINVAL;
1913                         }
1914                 }
1915                 if (mpl->private_data_size <
1916                         sizeof(struct rte_pktmbuf_pool_private)) {
1917                         RTE_ETHDEV_LOG(ERR,
1918                                        "%s private_data_size %u < %u\n",
1919                                        mpl->name, mpl->private_data_size,
1920                                        (unsigned int)sizeof
1921                                         (struct rte_pktmbuf_pool_private));
1922                         return -ENOSPC;
1923                 }
1924                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1925                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1926                 length = length != 0 ? length : *mbp_buf_size;
1927                 if (*mbp_buf_size < length + offset) {
1928                         RTE_ETHDEV_LOG(ERR,
1929                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1930                                        mpl->name, *mbp_buf_size,
1931                                        length + offset, length, offset);
1932                         return -EINVAL;
1933                 }
1934         }
1935         return 0;
1936 }
1937
1938 int
1939 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1940                        uint16_t nb_rx_desc, unsigned int socket_id,
1941                        const struct rte_eth_rxconf *rx_conf,
1942                        struct rte_mempool *mp)
1943 {
1944         int ret;
1945         uint32_t mbp_buf_size;
1946         struct rte_eth_dev *dev;
1947         struct rte_eth_dev_info dev_info;
1948         struct rte_eth_rxconf local_conf;
1949         void **rxq;
1950
1951         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1952
1953         dev = &rte_eth_devices[port_id];
1954         if (rx_queue_id >= dev->data->nb_rx_queues) {
1955                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1956                 return -EINVAL;
1957         }
1958
1959         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1960
1961         ret = rte_eth_dev_info_get(port_id, &dev_info);
1962         if (ret != 0)
1963                 return ret;
1964
1965         if (mp != NULL) {
1966                 /* Single pool configuration check. */
1967                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1968                         RTE_ETHDEV_LOG(ERR,
1969                                        "Ambiguous segment configuration\n");
1970                         return -EINVAL;
1971                 }
1972                 /*
1973                  * Check the size of the mbuf data buffer, this value
1974                  * must be provided in the private data of the memory pool.
1975                  * First check that the memory pool(s) has a valid private data.
1976                  */
1977                 if (mp->private_data_size <
1978                                 sizeof(struct rte_pktmbuf_pool_private)) {
1979                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1980                                 mp->name, mp->private_data_size,
1981                                 (unsigned int)
1982                                 sizeof(struct rte_pktmbuf_pool_private));
1983                         return -ENOSPC;
1984                 }
1985                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1986                 if (mbp_buf_size < dev_info.min_rx_bufsize +
1987                                    RTE_PKTMBUF_HEADROOM) {
1988                         RTE_ETHDEV_LOG(ERR,
1989                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1990                                        mp->name, mbp_buf_size,
1991                                        RTE_PKTMBUF_HEADROOM +
1992                                        dev_info.min_rx_bufsize,
1993                                        RTE_PKTMBUF_HEADROOM,
1994                                        dev_info.min_rx_bufsize);
1995                         return -EINVAL;
1996                 }
1997         } else {
1998                 const struct rte_eth_rxseg_split *rx_seg;
1999                 uint16_t n_seg;
2000
2001                 /* Extended multi-segment configuration check. */
2002                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
2003                         RTE_ETHDEV_LOG(ERR,
2004                                        "Memory pool is null and no extended configuration provided\n");
2005                         return -EINVAL;
2006                 }
2007
2008                 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2009                 n_seg = rx_conf->rx_nseg;
2010
2011                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2012                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
2013                                                            &mbp_buf_size,
2014                                                            &dev_info);
2015                         if (ret != 0)
2016                                 return ret;
2017                 } else {
2018                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2019                         return -EINVAL;
2020                 }
2021         }
2022
2023         /* Use default specified by driver, if nb_rx_desc is zero */
2024         if (nb_rx_desc == 0) {
2025                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
2026                 /* If driver default is also zero, fall back on EAL default */
2027                 if (nb_rx_desc == 0)
2028                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2029         }
2030
2031         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2032                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2033                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2034
2035                 RTE_ETHDEV_LOG(ERR,
2036                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2037                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2038                         dev_info.rx_desc_lim.nb_min,
2039                         dev_info.rx_desc_lim.nb_align);
2040                 return -EINVAL;
2041         }
2042
2043         if (dev->data->dev_started &&
2044                 !(dev_info.dev_capa &
2045                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2046                 return -EBUSY;
2047
2048         if (dev->data->dev_started &&
2049                 (dev->data->rx_queue_state[rx_queue_id] !=
2050                         RTE_ETH_QUEUE_STATE_STOPPED))
2051                 return -EBUSY;
2052
2053         rxq = dev->data->rx_queues;
2054         if (rxq[rx_queue_id]) {
2055                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2056                                         -ENOTSUP);
2057                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2058                 rxq[rx_queue_id] = NULL;
2059         }
2060
2061         if (rx_conf == NULL)
2062                 rx_conf = &dev_info.default_rxconf;
2063
2064         local_conf = *rx_conf;
2065
2066         /*
2067          * If an offloading has already been enabled in
2068          * rte_eth_dev_configure(), it has been enabled on all queues,
2069          * so there is no need to enable it in this queue again.
2070          * The local_conf.offloads input to underlying PMD only carries
2071          * those offloadings which are only enabled on this queue and
2072          * not enabled on all queues.
2073          */
2074         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2075
2076         /*
2077          * New added offloadings for this queue are those not enabled in
2078          * rte_eth_dev_configure() and they must be per-queue type.
2079          * A pure per-port offloading can't be enabled on a queue while
2080          * disabled on another queue. A pure per-port offloading can't
2081          * be enabled for any queue as new added one if it hasn't been
2082          * enabled in rte_eth_dev_configure().
2083          */
2084         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2085              local_conf.offloads) {
2086                 RTE_ETHDEV_LOG(ERR,
2087                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2088                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2089                         port_id, rx_queue_id, local_conf.offloads,
2090                         dev_info.rx_queue_offload_capa,
2091                         __func__);
2092                 return -EINVAL;
2093         }
2094
2095         /*
2096          * If LRO is enabled, check that the maximum aggregated packet
2097          * size is supported by the configured device.
2098          */
2099         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2100                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2101                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
2102                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2103                 int ret = eth_dev_check_lro_pkt_size(port_id,
2104                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2105                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2106                                 dev_info.max_lro_pkt_size);
2107                 if (ret != 0)
2108                         return ret;
2109         }
2110
2111         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2112                                               socket_id, &local_conf, mp);
2113         if (!ret) {
2114                 if (!dev->data->min_rx_buf_size ||
2115                     dev->data->min_rx_buf_size > mbp_buf_size)
2116                         dev->data->min_rx_buf_size = mbp_buf_size;
2117         }
2118
2119         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2120                 rx_conf, ret);
2121         return eth_err(port_id, ret);
2122 }
2123
2124 int
2125 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2126                                uint16_t nb_rx_desc,
2127                                const struct rte_eth_hairpin_conf *conf)
2128 {
2129         int ret;
2130         struct rte_eth_dev *dev;
2131         struct rte_eth_hairpin_cap cap;
2132         void **rxq;
2133         int i;
2134         int count;
2135
2136         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2137
2138         dev = &rte_eth_devices[port_id];
2139         if (rx_queue_id >= dev->data->nb_rx_queues) {
2140                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2141                 return -EINVAL;
2142         }
2143         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2144         if (ret != 0)
2145                 return ret;
2146         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2147                                 -ENOTSUP);
2148         /* if nb_rx_desc is zero use max number of desc from the driver. */
2149         if (nb_rx_desc == 0)
2150                 nb_rx_desc = cap.max_nb_desc;
2151         if (nb_rx_desc > cap.max_nb_desc) {
2152                 RTE_ETHDEV_LOG(ERR,
2153                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2154                         nb_rx_desc, cap.max_nb_desc);
2155                 return -EINVAL;
2156         }
2157         if (conf->peer_count > cap.max_rx_2_tx) {
2158                 RTE_ETHDEV_LOG(ERR,
2159                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2160                         conf->peer_count, cap.max_rx_2_tx);
2161                 return -EINVAL;
2162         }
2163         if (conf->peer_count == 0) {
2164                 RTE_ETHDEV_LOG(ERR,
2165                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2166                         conf->peer_count);
2167                 return -EINVAL;
2168         }
2169         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2170              cap.max_nb_queues != UINT16_MAX; i++) {
2171                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2172                         count++;
2173         }
2174         if (count > cap.max_nb_queues) {
2175                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2176                 cap.max_nb_queues);
2177                 return -EINVAL;
2178         }
2179         if (dev->data->dev_started)
2180                 return -EBUSY;
2181         rxq = dev->data->rx_queues;
2182         if (rxq[rx_queue_id] != NULL) {
2183                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2184                                         -ENOTSUP);
2185                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2186                 rxq[rx_queue_id] = NULL;
2187         }
2188         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2189                                                       nb_rx_desc, conf);
2190         if (ret == 0)
2191                 dev->data->rx_queue_state[rx_queue_id] =
2192                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2193         return eth_err(port_id, ret);
2194 }
2195
2196 int
2197 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2198                        uint16_t nb_tx_desc, unsigned int socket_id,
2199                        const struct rte_eth_txconf *tx_conf)
2200 {
2201         struct rte_eth_dev *dev;
2202         struct rte_eth_dev_info dev_info;
2203         struct rte_eth_txconf local_conf;
2204         void **txq;
2205         int ret;
2206
2207         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2208
2209         dev = &rte_eth_devices[port_id];
2210         if (tx_queue_id >= dev->data->nb_tx_queues) {
2211                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2212                 return -EINVAL;
2213         }
2214
2215         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2216
2217         ret = rte_eth_dev_info_get(port_id, &dev_info);
2218         if (ret != 0)
2219                 return ret;
2220
2221         /* Use default specified by driver, if nb_tx_desc is zero */
2222         if (nb_tx_desc == 0) {
2223                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2224                 /* If driver default is zero, fall back on EAL default */
2225                 if (nb_tx_desc == 0)
2226                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2227         }
2228         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2229             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2230             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2231                 RTE_ETHDEV_LOG(ERR,
2232                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2233                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2234                         dev_info.tx_desc_lim.nb_min,
2235                         dev_info.tx_desc_lim.nb_align);
2236                 return -EINVAL;
2237         }
2238
2239         if (dev->data->dev_started &&
2240                 !(dev_info.dev_capa &
2241                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2242                 return -EBUSY;
2243
2244         if (dev->data->dev_started &&
2245                 (dev->data->tx_queue_state[tx_queue_id] !=
2246                         RTE_ETH_QUEUE_STATE_STOPPED))
2247                 return -EBUSY;
2248
2249         txq = dev->data->tx_queues;
2250         if (txq[tx_queue_id]) {
2251                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2252                                         -ENOTSUP);
2253                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2254                 txq[tx_queue_id] = NULL;
2255         }
2256
2257         if (tx_conf == NULL)
2258                 tx_conf = &dev_info.default_txconf;
2259
2260         local_conf = *tx_conf;
2261
2262         /*
2263          * If an offloading has already been enabled in
2264          * rte_eth_dev_configure(), it has been enabled on all queues,
2265          * so there is no need to enable it in this queue again.
2266          * The local_conf.offloads input to underlying PMD only carries
2267          * those offloadings which are only enabled on this queue and
2268          * not enabled on all queues.
2269          */
2270         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2271
2272         /*
2273          * New added offloadings for this queue are those not enabled in
2274          * rte_eth_dev_configure() and they must be per-queue type.
2275          * A pure per-port offloading can't be enabled on a queue while
2276          * disabled on another queue. A pure per-port offloading can't
2277          * be enabled for any queue as new added one if it hasn't been
2278          * enabled in rte_eth_dev_configure().
2279          */
2280         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2281              local_conf.offloads) {
2282                 RTE_ETHDEV_LOG(ERR,
2283                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2284                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2285                         port_id, tx_queue_id, local_conf.offloads,
2286                         dev_info.tx_queue_offload_capa,
2287                         __func__);
2288                 return -EINVAL;
2289         }
2290
2291         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2292         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2293                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2294 }
2295
2296 int
2297 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2298                                uint16_t nb_tx_desc,
2299                                const struct rte_eth_hairpin_conf *conf)
2300 {
2301         struct rte_eth_dev *dev;
2302         struct rte_eth_hairpin_cap cap;
2303         void **txq;
2304         int i;
2305         int count;
2306         int ret;
2307
2308         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2309         dev = &rte_eth_devices[port_id];
2310         if (tx_queue_id >= dev->data->nb_tx_queues) {
2311                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2312                 return -EINVAL;
2313         }
2314         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2315         if (ret != 0)
2316                 return ret;
2317         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2318                                 -ENOTSUP);
2319         /* if nb_rx_desc is zero use max number of desc from the driver. */
2320         if (nb_tx_desc == 0)
2321                 nb_tx_desc = cap.max_nb_desc;
2322         if (nb_tx_desc > cap.max_nb_desc) {
2323                 RTE_ETHDEV_LOG(ERR,
2324                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2325                         nb_tx_desc, cap.max_nb_desc);
2326                 return -EINVAL;
2327         }
2328         if (conf->peer_count > cap.max_tx_2_rx) {
2329                 RTE_ETHDEV_LOG(ERR,
2330                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2331                         conf->peer_count, cap.max_tx_2_rx);
2332                 return -EINVAL;
2333         }
2334         if (conf->peer_count == 0) {
2335                 RTE_ETHDEV_LOG(ERR,
2336                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2337                         conf->peer_count);
2338                 return -EINVAL;
2339         }
2340         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2341              cap.max_nb_queues != UINT16_MAX; i++) {
2342                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2343                         count++;
2344         }
2345         if (count > cap.max_nb_queues) {
2346                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2347                 cap.max_nb_queues);
2348                 return -EINVAL;
2349         }
2350         if (dev->data->dev_started)
2351                 return -EBUSY;
2352         txq = dev->data->tx_queues;
2353         if (txq[tx_queue_id] != NULL) {
2354                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2355                                         -ENOTSUP);
2356                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2357                 txq[tx_queue_id] = NULL;
2358         }
2359         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2360                 (dev, tx_queue_id, nb_tx_desc, conf);
2361         if (ret == 0)
2362                 dev->data->tx_queue_state[tx_queue_id] =
2363                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2364         return eth_err(port_id, ret);
2365 }
2366
2367 int
2368 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2369 {
2370         struct rte_eth_dev *dev;
2371         int ret;
2372
2373         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2374         dev = &rte_eth_devices[tx_port];
2375         if (dev->data->dev_started == 0) {
2376                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2377                 return -EBUSY;
2378         }
2379
2380         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2381         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2382         if (ret != 0)
2383                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2384                                " to Rx %d (%d - all ports)\n",
2385                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2386
2387         return ret;
2388 }
2389
2390 int
2391 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2392 {
2393         struct rte_eth_dev *dev;
2394         int ret;
2395
2396         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2397         dev = &rte_eth_devices[tx_port];
2398         if (dev->data->dev_started == 0) {
2399                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2400                 return -EBUSY;
2401         }
2402
2403         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2404         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2405         if (ret != 0)
2406                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2407                                " from Rx %d (%d - all ports)\n",
2408                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2409
2410         return ret;
2411 }
2412
2413 int
2414 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2415                                size_t len, uint32_t direction)
2416 {
2417         struct rte_eth_dev *dev;
2418         int ret;
2419
2420         if (peer_ports == NULL || len == 0)
2421                 return -EINVAL;
2422
2423         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2424         dev = &rte_eth_devices[port_id];
2425         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2426                                 -ENOTSUP);
2427
2428         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2429                                                       len, direction);
2430         if (ret < 0)
2431                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2432                                port_id, direction ? "Rx" : "Tx");
2433
2434         return ret;
2435 }
2436
2437 void
2438 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2439                 void *userdata __rte_unused)
2440 {
2441         rte_pktmbuf_free_bulk(pkts, unsent);
2442 }
2443
2444 void
2445 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2446                 void *userdata)
2447 {
2448         uint64_t *count = userdata;
2449
2450         rte_pktmbuf_free_bulk(pkts, unsent);
2451         *count += unsent;
2452 }
2453
2454 int
2455 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2456                 buffer_tx_error_fn cbfn, void *userdata)
2457 {
2458         buffer->error_callback = cbfn;
2459         buffer->error_userdata = userdata;
2460         return 0;
2461 }
2462
2463 int
2464 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2465 {
2466         int ret = 0;
2467
2468         if (buffer == NULL)
2469                 return -EINVAL;
2470
2471         buffer->size = size;
2472         if (buffer->error_callback == NULL) {
2473                 ret = rte_eth_tx_buffer_set_err_callback(
2474                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2475         }
2476
2477         return ret;
2478 }
2479
2480 int
2481 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2482 {
2483         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2484         int ret;
2485
2486         /* Validate Input Data. Bail if not valid or not supported. */
2487         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2488         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2489
2490         /* Call driver to free pending mbufs. */
2491         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2492                                                free_cnt);
2493         return eth_err(port_id, ret);
2494 }
2495
2496 int
2497 rte_eth_promiscuous_enable(uint16_t port_id)
2498 {
2499         struct rte_eth_dev *dev;
2500         int diag = 0;
2501
2502         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2503         dev = &rte_eth_devices[port_id];
2504
2505         if (dev->data->promiscuous == 1)
2506                 return 0;
2507
2508         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2509
2510         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2511         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2512
2513         return eth_err(port_id, diag);
2514 }
2515
2516 int
2517 rte_eth_promiscuous_disable(uint16_t port_id)
2518 {
2519         struct rte_eth_dev *dev;
2520         int diag = 0;
2521
2522         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2523         dev = &rte_eth_devices[port_id];
2524
2525         if (dev->data->promiscuous == 0)
2526                 return 0;
2527
2528         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2529
2530         dev->data->promiscuous = 0;
2531         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2532         if (diag != 0)
2533                 dev->data->promiscuous = 1;
2534
2535         return eth_err(port_id, diag);
2536 }
2537
2538 int
2539 rte_eth_promiscuous_get(uint16_t port_id)
2540 {
2541         struct rte_eth_dev *dev;
2542
2543         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2544
2545         dev = &rte_eth_devices[port_id];
2546         return dev->data->promiscuous;
2547 }
2548
2549 int
2550 rte_eth_allmulticast_enable(uint16_t port_id)
2551 {
2552         struct rte_eth_dev *dev;
2553         int diag;
2554
2555         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2556         dev = &rte_eth_devices[port_id];
2557
2558         if (dev->data->all_multicast == 1)
2559                 return 0;
2560
2561         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2562         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2563         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2564
2565         return eth_err(port_id, diag);
2566 }
2567
2568 int
2569 rte_eth_allmulticast_disable(uint16_t port_id)
2570 {
2571         struct rte_eth_dev *dev;
2572         int diag;
2573
2574         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2575         dev = &rte_eth_devices[port_id];
2576
2577         if (dev->data->all_multicast == 0)
2578                 return 0;
2579
2580         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2581         dev->data->all_multicast = 0;
2582         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2583         if (diag != 0)
2584                 dev->data->all_multicast = 1;
2585
2586         return eth_err(port_id, diag);
2587 }
2588
2589 int
2590 rte_eth_allmulticast_get(uint16_t port_id)
2591 {
2592         struct rte_eth_dev *dev;
2593
2594         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2595
2596         dev = &rte_eth_devices[port_id];
2597         return dev->data->all_multicast;
2598 }
2599
2600 int
2601 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2602 {
2603         struct rte_eth_dev *dev;
2604
2605         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2606         dev = &rte_eth_devices[port_id];
2607
2608         if (dev->data->dev_conf.intr_conf.lsc &&
2609             dev->data->dev_started)
2610                 rte_eth_linkstatus_get(dev, eth_link);
2611         else {
2612                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2613                 (*dev->dev_ops->link_update)(dev, 1);
2614                 *eth_link = dev->data->dev_link;
2615         }
2616
2617         return 0;
2618 }
2619
2620 int
2621 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2622 {
2623         struct rte_eth_dev *dev;
2624
2625         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2626         dev = &rte_eth_devices[port_id];
2627
2628         if (dev->data->dev_conf.intr_conf.lsc &&
2629             dev->data->dev_started)
2630                 rte_eth_linkstatus_get(dev, eth_link);
2631         else {
2632                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2633                 (*dev->dev_ops->link_update)(dev, 0);
2634                 *eth_link = dev->data->dev_link;
2635         }
2636
2637         return 0;
2638 }
2639
2640 const char *
2641 rte_eth_link_speed_to_str(uint32_t link_speed)
2642 {
2643         switch (link_speed) {
2644         case ETH_SPEED_NUM_NONE: return "None";
2645         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2646         case ETH_SPEED_NUM_100M: return "100 Mbps";
2647         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2648         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2649         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2650         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2651         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2652         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2653         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2654         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2655         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2656         case ETH_SPEED_NUM_100G: return "100 Gbps";
2657         case ETH_SPEED_NUM_200G: return "200 Gbps";
2658         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2659         default: return "Invalid";
2660         }
2661 }
2662
2663 int
2664 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2665 {
2666         if (eth_link->link_status == ETH_LINK_DOWN)
2667                 return snprintf(str, len, "Link down");
2668         else
2669                 return snprintf(str, len, "Link up at %s %s %s",
2670                         rte_eth_link_speed_to_str(eth_link->link_speed),
2671                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2672                         "FDX" : "HDX",
2673                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2674                         "Autoneg" : "Fixed");
2675 }
2676
2677 int
2678 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2679 {
2680         struct rte_eth_dev *dev;
2681
2682         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2683
2684         dev = &rte_eth_devices[port_id];
2685         memset(stats, 0, sizeof(*stats));
2686
2687         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2688         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2689         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2690 }
2691
2692 int
2693 rte_eth_stats_reset(uint16_t port_id)
2694 {
2695         struct rte_eth_dev *dev;
2696         int ret;
2697
2698         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2699         dev = &rte_eth_devices[port_id];
2700
2701         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2702         ret = (*dev->dev_ops->stats_reset)(dev);
2703         if (ret != 0)
2704                 return eth_err(port_id, ret);
2705
2706         dev->data->rx_mbuf_alloc_failed = 0;
2707
2708         return 0;
2709 }
2710
2711 static inline int
2712 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2713 {
2714         uint16_t nb_rxqs, nb_txqs;
2715         int count;
2716
2717         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2718         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2719
2720         count = RTE_NB_STATS;
2721         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2722                 count += nb_rxqs * RTE_NB_RXQ_STATS;
2723                 count += nb_txqs * RTE_NB_TXQ_STATS;
2724         }
2725
2726         return count;
2727 }
2728
2729 static int
2730 eth_dev_get_xstats_count(uint16_t port_id)
2731 {
2732         struct rte_eth_dev *dev;
2733         int count;
2734
2735         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2736         dev = &rte_eth_devices[port_id];
2737         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2738                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2739                                 NULL, 0);
2740                 if (count < 0)
2741                         return eth_err(port_id, count);
2742         }
2743         if (dev->dev_ops->xstats_get_names != NULL) {
2744                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2745                 if (count < 0)
2746                         return eth_err(port_id, count);
2747         } else
2748                 count = 0;
2749
2750
2751         count += eth_dev_get_xstats_basic_count(dev);
2752
2753         return count;
2754 }
2755
2756 int
2757 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2758                 uint64_t *id)
2759 {
2760         int cnt_xstats, idx_xstat;
2761
2762         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2763
2764         if (!id) {
2765                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2766                 return -ENOMEM;
2767         }
2768
2769         if (!xstat_name) {
2770                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2771                 return -ENOMEM;
2772         }
2773
2774         /* Get count */
2775         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2776         if (cnt_xstats  < 0) {
2777                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2778                 return -ENODEV;
2779         }
2780
2781         /* Get id-name lookup table */
2782         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2783
2784         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2785                         port_id, xstats_names, cnt_xstats, NULL)) {
2786                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2787                 return -1;
2788         }
2789
2790         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2791                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2792                         *id = idx_xstat;
2793                         return 0;
2794                 };
2795         }
2796
2797         return -EINVAL;
2798 }
2799
2800 /* retrieve basic stats names */
2801 static int
2802 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2803         struct rte_eth_xstat_name *xstats_names)
2804 {
2805         int cnt_used_entries = 0;
2806         uint32_t idx, id_queue;
2807         uint16_t num_q;
2808
2809         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2810                 strlcpy(xstats_names[cnt_used_entries].name,
2811                         eth_dev_stats_strings[idx].name,
2812                         sizeof(xstats_names[0].name));
2813                 cnt_used_entries++;
2814         }
2815
2816         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2817                 return cnt_used_entries;
2818
2819         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2820         for (id_queue = 0; id_queue < num_q; id_queue++) {
2821                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2822                         snprintf(xstats_names[cnt_used_entries].name,
2823                                 sizeof(xstats_names[0].name),
2824                                 "rx_q%u_%s",
2825                                 id_queue, eth_dev_rxq_stats_strings[idx].name);
2826                         cnt_used_entries++;
2827                 }
2828
2829         }
2830         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2831         for (id_queue = 0; id_queue < num_q; id_queue++) {
2832                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2833                         snprintf(xstats_names[cnt_used_entries].name,
2834                                 sizeof(xstats_names[0].name),
2835                                 "tx_q%u_%s",
2836                                 id_queue, eth_dev_txq_stats_strings[idx].name);
2837                         cnt_used_entries++;
2838                 }
2839         }
2840         return cnt_used_entries;
2841 }
2842
2843 /* retrieve ethdev extended statistics names */
2844 int
2845 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2846         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2847         uint64_t *ids)
2848 {
2849         struct rte_eth_xstat_name *xstats_names_copy;
2850         unsigned int no_basic_stat_requested = 1;
2851         unsigned int no_ext_stat_requested = 1;
2852         unsigned int expected_entries;
2853         unsigned int basic_count;
2854         struct rte_eth_dev *dev;
2855         unsigned int i;
2856         int ret;
2857
2858         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2859         dev = &rte_eth_devices[port_id];
2860
2861         basic_count = eth_dev_get_xstats_basic_count(dev);
2862         ret = eth_dev_get_xstats_count(port_id);
2863         if (ret < 0)
2864                 return ret;
2865         expected_entries = (unsigned int)ret;
2866
2867         /* Return max number of stats if no ids given */
2868         if (!ids) {
2869                 if (!xstats_names)
2870                         return expected_entries;
2871                 else if (xstats_names && size < expected_entries)
2872                         return expected_entries;
2873         }
2874
2875         if (ids && !xstats_names)
2876                 return -EINVAL;
2877
2878         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2879                 uint64_t ids_copy[size];
2880
2881                 for (i = 0; i < size; i++) {
2882                         if (ids[i] < basic_count) {
2883                                 no_basic_stat_requested = 0;
2884                                 break;
2885                         }
2886
2887                         /*
2888                          * Convert ids to xstats ids that PMD knows.
2889                          * ids known by user are basic + extended stats.
2890                          */
2891                         ids_copy[i] = ids[i] - basic_count;
2892                 }
2893
2894                 if (no_basic_stat_requested)
2895                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2896                                         xstats_names, ids_copy, size);
2897         }
2898
2899         /* Retrieve all stats */
2900         if (!ids) {
2901                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2902                                 expected_entries);
2903                 if (num_stats < 0 || num_stats > (int)expected_entries)
2904                         return num_stats;
2905                 else
2906                         return expected_entries;
2907         }
2908
2909         xstats_names_copy = calloc(expected_entries,
2910                 sizeof(struct rte_eth_xstat_name));
2911
2912         if (!xstats_names_copy) {
2913                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2914                 return -ENOMEM;
2915         }
2916
2917         if (ids) {
2918                 for (i = 0; i < size; i++) {
2919                         if (ids[i] >= basic_count) {
2920                                 no_ext_stat_requested = 0;
2921                                 break;
2922                         }
2923                 }
2924         }
2925
2926         /* Fill xstats_names_copy structure */
2927         if (ids && no_ext_stat_requested) {
2928                 eth_basic_stats_get_names(dev, xstats_names_copy);
2929         } else {
2930                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2931                         expected_entries);
2932                 if (ret < 0) {
2933                         free(xstats_names_copy);
2934                         return ret;
2935                 }
2936         }
2937
2938         /* Filter stats */
2939         for (i = 0; i < size; i++) {
2940                 if (ids[i] >= expected_entries) {
2941                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2942                         free(xstats_names_copy);
2943                         return -1;
2944                 }
2945                 xstats_names[i] = xstats_names_copy[ids[i]];
2946         }
2947
2948         free(xstats_names_copy);
2949         return size;
2950 }
2951
2952 int
2953 rte_eth_xstats_get_names(uint16_t port_id,
2954         struct rte_eth_xstat_name *xstats_names,
2955         unsigned int size)
2956 {
2957         struct rte_eth_dev *dev;
2958         int cnt_used_entries;
2959         int cnt_expected_entries;
2960         int cnt_driver_entries;
2961
2962         cnt_expected_entries = eth_dev_get_xstats_count(port_id);
2963         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2964                         (int)size < cnt_expected_entries)
2965                 return cnt_expected_entries;
2966
2967         /* port_id checked in eth_dev_get_xstats_count() */
2968         dev = &rte_eth_devices[port_id];
2969
2970         cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
2971
2972         if (dev->dev_ops->xstats_get_names != NULL) {
2973                 /* If there are any driver-specific xstats, append them
2974                  * to end of list.
2975                  */
2976                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2977                         dev,
2978                         xstats_names + cnt_used_entries,
2979                         size - cnt_used_entries);
2980                 if (cnt_driver_entries < 0)
2981                         return eth_err(port_id, cnt_driver_entries);
2982                 cnt_used_entries += cnt_driver_entries;
2983         }
2984
2985         return cnt_used_entries;
2986 }
2987
2988
2989 static int
2990 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2991 {
2992         struct rte_eth_dev *dev;
2993         struct rte_eth_stats eth_stats;
2994         unsigned int count = 0, i, q;
2995         uint64_t val, *stats_ptr;
2996         uint16_t nb_rxqs, nb_txqs;
2997         int ret;
2998
2999         ret = rte_eth_stats_get(port_id, &eth_stats);
3000         if (ret < 0)
3001                 return ret;
3002
3003         dev = &rte_eth_devices[port_id];
3004
3005         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3006         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3007
3008         /* global stats */
3009         for (i = 0; i < RTE_NB_STATS; i++) {
3010                 stats_ptr = RTE_PTR_ADD(&eth_stats,
3011                                         eth_dev_stats_strings[i].offset);
3012                 val = *stats_ptr;
3013                 xstats[count++].value = val;
3014         }
3015
3016         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3017                 return count;
3018
3019         /* per-rxq stats */
3020         for (q = 0; q < nb_rxqs; q++) {
3021                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3022                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3023                                         eth_dev_rxq_stats_strings[i].offset +
3024                                         q * sizeof(uint64_t));
3025                         val = *stats_ptr;
3026                         xstats[count++].value = val;
3027                 }
3028         }
3029
3030         /* per-txq stats */
3031         for (q = 0; q < nb_txqs; q++) {
3032                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3033                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3034                                         eth_dev_txq_stats_strings[i].offset +
3035                                         q * sizeof(uint64_t));
3036                         val = *stats_ptr;
3037                         xstats[count++].value = val;
3038                 }
3039         }
3040         return count;
3041 }
3042
3043 /* retrieve ethdev extended statistics */
3044 int
3045 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3046                          uint64_t *values, unsigned int size)
3047 {
3048         unsigned int no_basic_stat_requested = 1;
3049         unsigned int no_ext_stat_requested = 1;
3050         unsigned int num_xstats_filled;
3051         unsigned int basic_count;
3052         uint16_t expected_entries;
3053         struct rte_eth_dev *dev;
3054         unsigned int i;
3055         int ret;
3056
3057         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3058         ret = eth_dev_get_xstats_count(port_id);
3059         if (ret < 0)
3060                 return ret;
3061         expected_entries = (uint16_t)ret;
3062         struct rte_eth_xstat xstats[expected_entries];
3063         dev = &rte_eth_devices[port_id];
3064         basic_count = eth_dev_get_xstats_basic_count(dev);
3065
3066         /* Return max number of stats if no ids given */
3067         if (!ids) {
3068                 if (!values)
3069                         return expected_entries;
3070                 else if (values && size < expected_entries)
3071                         return expected_entries;
3072         }
3073
3074         if (ids && !values)
3075                 return -EINVAL;
3076
3077         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3078                 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3079                 uint64_t ids_copy[size];
3080
3081                 for (i = 0; i < size; i++) {
3082                         if (ids[i] < basic_count) {
3083                                 no_basic_stat_requested = 0;
3084                                 break;
3085                         }
3086
3087                         /*
3088                          * Convert ids to xstats ids that PMD knows.
3089                          * ids known by user are basic + extended stats.
3090                          */
3091                         ids_copy[i] = ids[i] - basic_count;
3092                 }
3093
3094                 if (no_basic_stat_requested)
3095                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3096                                         values, size);
3097         }
3098
3099         if (ids) {
3100                 for (i = 0; i < size; i++) {
3101                         if (ids[i] >= basic_count) {
3102                                 no_ext_stat_requested = 0;
3103                                 break;
3104                         }
3105                 }
3106         }
3107
3108         /* Fill the xstats structure */
3109         if (ids && no_ext_stat_requested)
3110                 ret = eth_basic_stats_get(port_id, xstats);
3111         else
3112                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3113
3114         if (ret < 0)
3115                 return ret;
3116         num_xstats_filled = (unsigned int)ret;
3117
3118         /* Return all stats */
3119         if (!ids) {
3120                 for (i = 0; i < num_xstats_filled; i++)
3121                         values[i] = xstats[i].value;
3122                 return expected_entries;
3123         }
3124
3125         /* Filter stats */
3126         for (i = 0; i < size; i++) {
3127                 if (ids[i] >= expected_entries) {
3128                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3129                         return -1;
3130                 }
3131                 values[i] = xstats[ids[i]].value;
3132         }
3133         return size;
3134 }
3135
3136 int
3137 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3138         unsigned int n)
3139 {
3140         struct rte_eth_dev *dev;
3141         unsigned int count = 0, i;
3142         signed int xcount = 0;
3143         uint16_t nb_rxqs, nb_txqs;
3144         int ret;
3145
3146         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3147
3148         dev = &rte_eth_devices[port_id];
3149
3150         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3151         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3152
3153         /* Return generic statistics */
3154         count = RTE_NB_STATS;
3155         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3156                 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3157
3158         /* implemented by the driver */
3159         if (dev->dev_ops->xstats_get != NULL) {
3160                 /* Retrieve the xstats from the driver at the end of the
3161                  * xstats struct.
3162                  */
3163                 xcount = (*dev->dev_ops->xstats_get)(dev,
3164                                      xstats ? xstats + count : NULL,
3165                                      (n > count) ? n - count : 0);
3166
3167                 if (xcount < 0)
3168                         return eth_err(port_id, xcount);
3169         }
3170
3171         if (n < count + xcount || xstats == NULL)
3172                 return count + xcount;
3173
3174         /* now fill the xstats structure */
3175         ret = eth_basic_stats_get(port_id, xstats);
3176         if (ret < 0)
3177                 return ret;
3178         count = ret;
3179
3180         for (i = 0; i < count; i++)
3181                 xstats[i].id = i;
3182         /* add an offset to driver-specific stats */
3183         for ( ; i < count + xcount; i++)
3184                 xstats[i].id += count;
3185
3186         return count + xcount;
3187 }
3188
3189 /* reset ethdev extended statistics */
3190 int
3191 rte_eth_xstats_reset(uint16_t port_id)
3192 {
3193         struct rte_eth_dev *dev;
3194
3195         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3196         dev = &rte_eth_devices[port_id];
3197
3198         /* implemented by the driver */
3199         if (dev->dev_ops->xstats_reset != NULL)
3200                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3201
3202         /* fallback to default */
3203         return rte_eth_stats_reset(port_id);
3204 }
3205
3206 static int
3207 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3208                 uint8_t stat_idx, uint8_t is_rx)
3209 {
3210         struct rte_eth_dev *dev;
3211
3212         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3213
3214         dev = &rte_eth_devices[port_id];
3215
3216         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3217
3218         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3219                 return -EINVAL;
3220
3221         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3222                 return -EINVAL;
3223
3224         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3225                 return -EINVAL;
3226
3227         return (*dev->dev_ops->queue_stats_mapping_set)
3228                         (dev, queue_id, stat_idx, is_rx);
3229 }
3230
3231
3232 int
3233 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3234                 uint8_t stat_idx)
3235 {
3236         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3237                                                 tx_queue_id,
3238                                                 stat_idx, STAT_QMAP_TX));
3239 }
3240
3241
3242 int
3243 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3244                 uint8_t stat_idx)
3245 {
3246         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3247                                                 rx_queue_id,
3248                                                 stat_idx, STAT_QMAP_RX));
3249 }
3250
3251 int
3252 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3253 {
3254         struct rte_eth_dev *dev;
3255
3256         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3257         dev = &rte_eth_devices[port_id];
3258
3259         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3260         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3261                                                         fw_version, fw_size));
3262 }
3263
3264 int
3265 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3266 {
3267         struct rte_eth_dev *dev;
3268         const struct rte_eth_desc_lim lim = {
3269                 .nb_max = UINT16_MAX,
3270                 .nb_min = 0,
3271                 .nb_align = 1,
3272                 .nb_seg_max = UINT16_MAX,
3273                 .nb_mtu_seg_max = UINT16_MAX,
3274         };
3275         int diag;
3276
3277         /*
3278          * Init dev_info before port_id check since caller does not have
3279          * return status and does not know if get is successful or not.
3280          */
3281         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3282         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3283
3284         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3285         dev = &rte_eth_devices[port_id];
3286
3287         dev_info->rx_desc_lim = lim;
3288         dev_info->tx_desc_lim = lim;
3289         dev_info->device = dev->device;
3290         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3291         dev_info->max_mtu = UINT16_MAX;
3292
3293         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3294         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3295         if (diag != 0) {
3296                 /* Cleanup already filled in device information */
3297                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3298                 return eth_err(port_id, diag);
3299         }
3300
3301         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3302         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3303                         RTE_MAX_QUEUES_PER_PORT);
3304         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3305                         RTE_MAX_QUEUES_PER_PORT);
3306
3307         dev_info->driver_name = dev->device->driver->name;
3308         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3309         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3310
3311         dev_info->dev_flags = &dev->data->dev_flags;
3312
3313         return 0;
3314 }
3315
3316 int
3317 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3318                                  uint32_t *ptypes, int num)
3319 {
3320         int i, j;
3321         struct rte_eth_dev *dev;
3322         const uint32_t *all_ptypes;
3323
3324         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3325         dev = &rte_eth_devices[port_id];
3326         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3327         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3328
3329         if (!all_ptypes)
3330                 return 0;
3331
3332         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3333                 if (all_ptypes[i] & ptype_mask) {
3334                         if (j < num)
3335                                 ptypes[j] = all_ptypes[i];
3336                         j++;
3337                 }
3338
3339         return j;
3340 }
3341
3342 int
3343 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3344                                  uint32_t *set_ptypes, unsigned int num)
3345 {
3346         const uint32_t valid_ptype_masks[] = {
3347                 RTE_PTYPE_L2_MASK,
3348                 RTE_PTYPE_L3_MASK,
3349                 RTE_PTYPE_L4_MASK,
3350                 RTE_PTYPE_TUNNEL_MASK,
3351                 RTE_PTYPE_INNER_L2_MASK,
3352                 RTE_PTYPE_INNER_L3_MASK,
3353                 RTE_PTYPE_INNER_L4_MASK,
3354         };
3355         const uint32_t *all_ptypes;
3356         struct rte_eth_dev *dev;
3357         uint32_t unused_mask;
3358         unsigned int i, j;
3359         int ret;
3360
3361         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3362         dev = &rte_eth_devices[port_id];
3363
3364         if (num > 0 && set_ptypes == NULL)
3365                 return -EINVAL;
3366
3367         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3368                         *dev->dev_ops->dev_ptypes_set == NULL) {
3369                 ret = 0;
3370                 goto ptype_unknown;
3371         }
3372
3373         if (ptype_mask == 0) {
3374                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3375                                 ptype_mask);
3376                 goto ptype_unknown;
3377         }
3378
3379         unused_mask = ptype_mask;
3380         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3381                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3382                 if (mask && mask != valid_ptype_masks[i]) {
3383                         ret = -EINVAL;
3384                         goto ptype_unknown;
3385                 }
3386                 unused_mask &= ~valid_ptype_masks[i];
3387         }
3388
3389         if (unused_mask) {
3390                 ret = -EINVAL;
3391                 goto ptype_unknown;
3392         }
3393
3394         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3395         if (all_ptypes == NULL) {
3396                 ret = 0;
3397                 goto ptype_unknown;
3398         }
3399
3400         /*
3401          * Accommodate as many set_ptypes as possible. If the supplied
3402          * set_ptypes array is insufficient fill it partially.
3403          */
3404         for (i = 0, j = 0; set_ptypes != NULL &&
3405                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3406                 if (ptype_mask & all_ptypes[i]) {
3407                         if (j < num - 1) {
3408                                 set_ptypes[j] = all_ptypes[i];
3409                                 j++;
3410                                 continue;
3411                         }
3412                         break;
3413                 }
3414         }
3415
3416         if (set_ptypes != NULL && j < num)
3417                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3418
3419         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3420
3421 ptype_unknown:
3422         if (num > 0)
3423                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3424
3425         return ret;
3426 }
3427
3428 int
3429 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3430 {
3431         struct rte_eth_dev *dev;
3432
3433         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3434         dev = &rte_eth_devices[port_id];
3435         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3436
3437         return 0;
3438 }
3439
3440 int
3441 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3442 {
3443         struct rte_eth_dev *dev;
3444
3445         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3446
3447         dev = &rte_eth_devices[port_id];
3448         *mtu = dev->data->mtu;
3449         return 0;
3450 }
3451
3452 int
3453 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3454 {
3455         int ret;
3456         struct rte_eth_dev_info dev_info;
3457         struct rte_eth_dev *dev;
3458
3459         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3460         dev = &rte_eth_devices[port_id];
3461         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3462
3463         /*
3464          * Check if the device supports dev_infos_get, if it does not
3465          * skip min_mtu/max_mtu validation here as this requires values
3466          * that are populated within the call to rte_eth_dev_info_get()
3467          * which relies on dev->dev_ops->dev_infos_get.
3468          */
3469         if (*dev->dev_ops->dev_infos_get != NULL) {
3470                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3471                 if (ret != 0)
3472                         return ret;
3473
3474                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3475                         return -EINVAL;
3476         }
3477
3478         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3479         if (!ret)
3480                 dev->data->mtu = mtu;
3481
3482         return eth_err(port_id, ret);
3483 }
3484
3485 int
3486 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3487 {
3488         struct rte_eth_dev *dev;
3489         int ret;
3490
3491         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3492         dev = &rte_eth_devices[port_id];
3493         if (!(dev->data->dev_conf.rxmode.offloads &
3494               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3495                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3496                         port_id);
3497                 return -ENOSYS;
3498         }
3499
3500         if (vlan_id > 4095) {
3501                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3502                         port_id, vlan_id);
3503                 return -EINVAL;
3504         }
3505         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3506
3507         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3508         if (ret == 0) {
3509                 struct rte_vlan_filter_conf *vfc;
3510                 int vidx;
3511                 int vbit;
3512
3513                 vfc = &dev->data->vlan_filter_conf;
3514                 vidx = vlan_id / 64;
3515                 vbit = vlan_id % 64;
3516
3517                 if (on)
3518                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3519                 else
3520                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3521         }
3522
3523         return eth_err(port_id, ret);
3524 }
3525
3526 int
3527 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3528                                     int on)
3529 {
3530         struct rte_eth_dev *dev;
3531
3532         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3533         dev = &rte_eth_devices[port_id];
3534         if (rx_queue_id >= dev->data->nb_rx_queues) {
3535                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3536                 return -EINVAL;
3537         }
3538
3539         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3540         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3541
3542         return 0;
3543 }
3544
3545 int
3546 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3547                                 enum rte_vlan_type vlan_type,
3548                                 uint16_t tpid)
3549 {
3550         struct rte_eth_dev *dev;
3551
3552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3553         dev = &rte_eth_devices[port_id];
3554         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3555
3556         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3557                                                                tpid));
3558 }
3559
3560 int
3561 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3562 {
3563         struct rte_eth_dev_info dev_info;
3564         struct rte_eth_dev *dev;
3565         int ret = 0;
3566         int mask = 0;
3567         int cur, org = 0;
3568         uint64_t orig_offloads;
3569         uint64_t dev_offloads;
3570         uint64_t new_offloads;
3571
3572         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3573         dev = &rte_eth_devices[port_id];
3574
3575         /* save original values in case of failure */
3576         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3577         dev_offloads = orig_offloads;
3578
3579         /* check which option changed by application */
3580         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3581         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3582         if (cur != org) {
3583                 if (cur)
3584                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3585                 else
3586                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3587                 mask |= ETH_VLAN_STRIP_MASK;
3588         }
3589
3590         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3591         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3592         if (cur != org) {
3593                 if (cur)
3594                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3595                 else
3596                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3597                 mask |= ETH_VLAN_FILTER_MASK;
3598         }
3599
3600         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3601         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3602         if (cur != org) {
3603                 if (cur)
3604                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3605                 else
3606                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3607                 mask |= ETH_VLAN_EXTEND_MASK;
3608         }
3609
3610         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3611         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3612         if (cur != org) {
3613                 if (cur)
3614                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3615                 else
3616                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3617                 mask |= ETH_QINQ_STRIP_MASK;
3618         }
3619
3620         /*no change*/
3621         if (mask == 0)
3622                 return ret;
3623
3624         ret = rte_eth_dev_info_get(port_id, &dev_info);
3625         if (ret != 0)
3626                 return ret;
3627
3628         /* Rx VLAN offloading must be within its device capabilities */
3629         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3630                 new_offloads = dev_offloads & ~orig_offloads;
3631                 RTE_ETHDEV_LOG(ERR,
3632                         "Ethdev port_id=%u requested new added VLAN offloads "
3633                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3634                         "0x%" PRIx64 " in %s()\n",
3635                         port_id, new_offloads, dev_info.rx_offload_capa,
3636                         __func__);
3637                 return -EINVAL;
3638         }
3639
3640         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3641         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3642         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3643         if (ret) {
3644                 /* hit an error restore  original values */
3645                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3646         }
3647
3648         return eth_err(port_id, ret);
3649 }
3650
3651 int
3652 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3653 {
3654         struct rte_eth_dev *dev;
3655         uint64_t *dev_offloads;
3656         int ret = 0;
3657
3658         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3659         dev = &rte_eth_devices[port_id];
3660         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3661
3662         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3663                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3664
3665         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3666                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3667
3668         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3669                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3670
3671         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3672                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3673
3674         return ret;
3675 }
3676
3677 int
3678 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3679 {
3680         struct rte_eth_dev *dev;
3681
3682         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3683         dev = &rte_eth_devices[port_id];
3684         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3685
3686         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3687 }
3688
3689 int
3690 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3691 {
3692         struct rte_eth_dev *dev;
3693
3694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3695         dev = &rte_eth_devices[port_id];
3696         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3697         memset(fc_conf, 0, sizeof(*fc_conf));
3698         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3699 }
3700
3701 int
3702 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3703 {
3704         struct rte_eth_dev *dev;
3705
3706         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3707         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3708                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3709                 return -EINVAL;
3710         }
3711
3712         dev = &rte_eth_devices[port_id];
3713         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3714         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3715 }
3716
3717 int
3718 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3719                                    struct rte_eth_pfc_conf *pfc_conf)
3720 {
3721         struct rte_eth_dev *dev;
3722
3723         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3724         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3725                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3726                 return -EINVAL;
3727         }
3728
3729         dev = &rte_eth_devices[port_id];
3730         /* High water, low water validation are device specific */
3731         if  (*dev->dev_ops->priority_flow_ctrl_set)
3732                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3733                                         (dev, pfc_conf));
3734         return -ENOTSUP;
3735 }
3736
3737 static int
3738 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3739                         uint16_t reta_size)
3740 {
3741         uint16_t i, num;
3742
3743         if (!reta_conf)
3744                 return -EINVAL;
3745
3746         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3747         for (i = 0; i < num; i++) {
3748                 if (reta_conf[i].mask)
3749                         return 0;
3750         }
3751
3752         return -EINVAL;
3753 }
3754
3755 static int
3756 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3757                          uint16_t reta_size,
3758                          uint16_t max_rxq)
3759 {
3760         uint16_t i, idx, shift;
3761
3762         if (!reta_conf)
3763                 return -EINVAL;
3764
3765         if (max_rxq == 0) {
3766                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3767                 return -EINVAL;
3768         }
3769
3770         for (i = 0; i < reta_size; i++) {
3771                 idx = i / RTE_RETA_GROUP_SIZE;
3772                 shift = i % RTE_RETA_GROUP_SIZE;
3773                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3774                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3775                         RTE_ETHDEV_LOG(ERR,
3776                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3777                                 idx, shift,
3778                                 reta_conf[idx].reta[shift], max_rxq);
3779                         return -EINVAL;
3780                 }
3781         }
3782
3783         return 0;
3784 }
3785
3786 int
3787 rte_eth_dev_rss_reta_update(uint16_t port_id,
3788                             struct rte_eth_rss_reta_entry64 *reta_conf,
3789                             uint16_t reta_size)
3790 {
3791         struct rte_eth_dev *dev;
3792         int ret;
3793
3794         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3795         /* Check mask bits */
3796         ret = eth_check_reta_mask(reta_conf, reta_size);
3797         if (ret < 0)
3798                 return ret;
3799
3800         dev = &rte_eth_devices[port_id];
3801
3802         /* Check entry value */
3803         ret = eth_check_reta_entry(reta_conf, reta_size,
3804                                 dev->data->nb_rx_queues);
3805         if (ret < 0)
3806                 return ret;
3807
3808         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3809         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3810                                                              reta_size));
3811 }
3812
3813 int
3814 rte_eth_dev_rss_reta_query(uint16_t port_id,
3815                            struct rte_eth_rss_reta_entry64 *reta_conf,
3816                            uint16_t reta_size)
3817 {
3818         struct rte_eth_dev *dev;
3819         int ret;
3820
3821         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3822
3823         /* Check mask bits */
3824         ret = eth_check_reta_mask(reta_conf, reta_size);
3825         if (ret < 0)
3826                 return ret;
3827
3828         dev = &rte_eth_devices[port_id];
3829         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3830         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3831                                                             reta_size));
3832 }
3833
3834 int
3835 rte_eth_dev_rss_hash_update(uint16_t port_id,
3836                             struct rte_eth_rss_conf *rss_conf)
3837 {
3838         struct rte_eth_dev *dev;
3839         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3840         int ret;
3841
3842         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3843
3844         ret = rte_eth_dev_info_get(port_id, &dev_info);
3845         if (ret != 0)
3846                 return ret;
3847
3848         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3849
3850         dev = &rte_eth_devices[port_id];
3851         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3852             dev_info.flow_type_rss_offloads) {
3853                 RTE_ETHDEV_LOG(ERR,
3854                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3855                         port_id, rss_conf->rss_hf,
3856                         dev_info.flow_type_rss_offloads);
3857                 return -EINVAL;
3858         }
3859         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3860         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3861                                                                  rss_conf));
3862 }
3863
3864 int
3865 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3866                               struct rte_eth_rss_conf *rss_conf)
3867 {
3868         struct rte_eth_dev *dev;
3869
3870         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3871         dev = &rte_eth_devices[port_id];
3872         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3873         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3874                                                                    rss_conf));
3875 }
3876
3877 int
3878 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3879                                 struct rte_eth_udp_tunnel *udp_tunnel)
3880 {
3881         struct rte_eth_dev *dev;
3882
3883         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3884         if (udp_tunnel == NULL) {
3885                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3886                 return -EINVAL;
3887         }
3888
3889         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3890                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3891                 return -EINVAL;
3892         }
3893
3894         dev = &rte_eth_devices[port_id];
3895         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3896         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3897                                                                 udp_tunnel));
3898 }
3899
3900 int
3901 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3902                                    struct rte_eth_udp_tunnel *udp_tunnel)
3903 {
3904         struct rte_eth_dev *dev;
3905
3906         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3907         dev = &rte_eth_devices[port_id];
3908
3909         if (udp_tunnel == NULL) {
3910                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3911                 return -EINVAL;
3912         }
3913
3914         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3915                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3916                 return -EINVAL;
3917         }
3918
3919         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3920         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3921                                                                 udp_tunnel));
3922 }
3923
3924 int
3925 rte_eth_led_on(uint16_t port_id)
3926 {
3927         struct rte_eth_dev *dev;
3928
3929         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3930         dev = &rte_eth_devices[port_id];
3931         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3932         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3933 }
3934
3935 int
3936 rte_eth_led_off(uint16_t port_id)
3937 {
3938         struct rte_eth_dev *dev;
3939
3940         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3941         dev = &rte_eth_devices[port_id];
3942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3943         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3944 }
3945
3946 int
3947 rte_eth_fec_get_capability(uint16_t port_id,
3948                            struct rte_eth_fec_capa *speed_fec_capa,
3949                            unsigned int num)
3950 {
3951         struct rte_eth_dev *dev;
3952         int ret;
3953
3954         if (speed_fec_capa == NULL && num > 0)
3955                 return -EINVAL;
3956
3957         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3958         dev = &rte_eth_devices[port_id];
3959         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
3960         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
3961
3962         return ret;
3963 }
3964
3965 int
3966 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
3967 {
3968         struct rte_eth_dev *dev;
3969
3970         if (fec_capa == NULL)
3971                 return -EINVAL;
3972
3973         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3974         dev = &rte_eth_devices[port_id];
3975         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
3976         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
3977 }
3978
3979 int
3980 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
3981 {
3982         struct rte_eth_dev *dev;
3983
3984         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3985         dev = &rte_eth_devices[port_id];
3986         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
3987         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
3988 }
3989
3990 /*
3991  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3992  * an empty spot.
3993  */
3994 static int
3995 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3996 {
3997         struct rte_eth_dev_info dev_info;
3998         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3999         unsigned i;
4000         int ret;
4001
4002         ret = rte_eth_dev_info_get(port_id, &dev_info);
4003         if (ret != 0)
4004                 return -1;
4005
4006         for (i = 0; i < dev_info.max_mac_addrs; i++)
4007                 if (memcmp(addr, &dev->data->mac_addrs[i],
4008                                 RTE_ETHER_ADDR_LEN) == 0)
4009                         return i;
4010
4011         return -1;
4012 }
4013
4014 static const struct rte_ether_addr null_mac_addr;
4015
4016 int
4017 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4018                         uint32_t pool)
4019 {
4020         struct rte_eth_dev *dev;
4021         int index;
4022         uint64_t pool_mask;
4023         int ret;
4024
4025         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4026         dev = &rte_eth_devices[port_id];
4027         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4028
4029         if (rte_is_zero_ether_addr(addr)) {
4030                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4031                         port_id);
4032                 return -EINVAL;
4033         }
4034         if (pool >= ETH_64_POOLS) {
4035                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
4036                 return -EINVAL;
4037         }
4038
4039         index = eth_dev_get_mac_addr_index(port_id, addr);
4040         if (index < 0) {
4041                 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4042                 if (index < 0) {
4043                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4044                                 port_id);
4045                         return -ENOSPC;
4046                 }
4047         } else {
4048                 pool_mask = dev->data->mac_pool_sel[index];
4049
4050                 /* Check if both MAC address and pool is already there, and do nothing */
4051                 if (pool_mask & (1ULL << pool))
4052                         return 0;
4053         }
4054
4055         /* Update NIC */
4056         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4057
4058         if (ret == 0) {
4059                 /* Update address in NIC data structure */
4060                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4061
4062                 /* Update pool bitmap in NIC data structure */
4063                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
4064         }
4065
4066         return eth_err(port_id, ret);
4067 }
4068
4069 int
4070 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4071 {
4072         struct rte_eth_dev *dev;
4073         int index;
4074
4075         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4076         dev = &rte_eth_devices[port_id];
4077         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4078
4079         index = eth_dev_get_mac_addr_index(port_id, addr);
4080         if (index == 0) {
4081                 RTE_ETHDEV_LOG(ERR,
4082                         "Port %u: Cannot remove default MAC address\n",
4083                         port_id);
4084                 return -EADDRINUSE;
4085         } else if (index < 0)
4086                 return 0;  /* Do nothing if address wasn't found */
4087
4088         /* Update NIC */
4089         (*dev->dev_ops->mac_addr_remove)(dev, index);
4090
4091         /* Update address in NIC data structure */
4092         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4093
4094         /* reset pool bitmap */
4095         dev->data->mac_pool_sel[index] = 0;
4096
4097         return 0;
4098 }
4099
4100 int
4101 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4102 {
4103         struct rte_eth_dev *dev;
4104         int ret;
4105
4106         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4107
4108         if (!rte_is_valid_assigned_ether_addr(addr))
4109                 return -EINVAL;
4110
4111         dev = &rte_eth_devices[port_id];
4112         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4113
4114         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4115         if (ret < 0)
4116                 return ret;
4117
4118         /* Update default address in NIC data structure */
4119         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4120
4121         return 0;
4122 }
4123
4124
4125 /*
4126  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4127  * an empty spot.
4128  */
4129 static int
4130 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4131                 const struct rte_ether_addr *addr)
4132 {
4133         struct rte_eth_dev_info dev_info;
4134         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4135         unsigned i;
4136         int ret;
4137
4138         ret = rte_eth_dev_info_get(port_id, &dev_info);
4139         if (ret != 0)
4140                 return -1;
4141
4142         if (!dev->data->hash_mac_addrs)
4143                 return -1;
4144
4145         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4146                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4147                         RTE_ETHER_ADDR_LEN) == 0)
4148                         return i;
4149
4150         return -1;
4151 }
4152
4153 int
4154 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4155                                 uint8_t on)
4156 {
4157         int index;
4158         int ret;
4159         struct rte_eth_dev *dev;
4160
4161         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4162
4163         dev = &rte_eth_devices[port_id];
4164         if (rte_is_zero_ether_addr(addr)) {
4165                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4166                         port_id);
4167                 return -EINVAL;
4168         }
4169
4170         index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4171         /* Check if it's already there, and do nothing */
4172         if ((index >= 0) && on)
4173                 return 0;
4174
4175         if (index < 0) {
4176                 if (!on) {
4177                         RTE_ETHDEV_LOG(ERR,
4178                                 "Port %u: the MAC address was not set in UTA\n",
4179                                 port_id);
4180                         return -EINVAL;
4181                 }
4182
4183                 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4184                 if (index < 0) {
4185                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4186                                 port_id);
4187                         return -ENOSPC;
4188                 }
4189         }
4190
4191         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4192         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4193         if (ret == 0) {
4194                 /* Update address in NIC data structure */
4195                 if (on)
4196                         rte_ether_addr_copy(addr,
4197                                         &dev->data->hash_mac_addrs[index]);
4198                 else
4199                         rte_ether_addr_copy(&null_mac_addr,
4200                                         &dev->data->hash_mac_addrs[index]);
4201         }
4202
4203         return eth_err(port_id, ret);
4204 }
4205
4206 int
4207 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4208 {
4209         struct rte_eth_dev *dev;
4210
4211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4212
4213         dev = &rte_eth_devices[port_id];
4214
4215         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4216         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4217                                                                        on));
4218 }
4219
4220 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4221                                         uint16_t tx_rate)
4222 {
4223         struct rte_eth_dev *dev;
4224         struct rte_eth_dev_info dev_info;
4225         struct rte_eth_link link;
4226         int ret;
4227
4228         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4229
4230         ret = rte_eth_dev_info_get(port_id, &dev_info);
4231         if (ret != 0)
4232                 return ret;
4233
4234         dev = &rte_eth_devices[port_id];
4235         link = dev->data->dev_link;
4236
4237         if (queue_idx > dev_info.max_tx_queues) {
4238                 RTE_ETHDEV_LOG(ERR,
4239                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4240                         port_id, queue_idx);
4241                 return -EINVAL;
4242         }
4243
4244         if (tx_rate > link.link_speed) {
4245                 RTE_ETHDEV_LOG(ERR,
4246                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4247                         tx_rate, link.link_speed);
4248                 return -EINVAL;
4249         }
4250
4251         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4252         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4253                                                         queue_idx, tx_rate));
4254 }
4255
4256 int
4257 rte_eth_mirror_rule_set(uint16_t port_id,
4258                         struct rte_eth_mirror_conf *mirror_conf,
4259                         uint8_t rule_id, uint8_t on)
4260 {
4261         struct rte_eth_dev *dev;
4262
4263         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4264         if (mirror_conf->rule_type == 0) {
4265                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4266                 return -EINVAL;
4267         }
4268
4269         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4270                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4271                         ETH_64_POOLS - 1);
4272                 return -EINVAL;
4273         }
4274
4275         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4276              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4277             (mirror_conf->pool_mask == 0)) {
4278                 RTE_ETHDEV_LOG(ERR,
4279                         "Invalid mirror pool, pool mask can not be 0\n");
4280                 return -EINVAL;
4281         }
4282
4283         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4284             mirror_conf->vlan.vlan_mask == 0) {
4285                 RTE_ETHDEV_LOG(ERR,
4286                         "Invalid vlan mask, vlan mask can not be 0\n");
4287                 return -EINVAL;
4288         }
4289
4290         dev = &rte_eth_devices[port_id];
4291         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4292
4293         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4294                                                 mirror_conf, rule_id, on));
4295 }
4296
4297 int
4298 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4299 {
4300         struct rte_eth_dev *dev;
4301
4302         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4303
4304         dev = &rte_eth_devices[port_id];
4305         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4306
4307         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
4308                                                                    rule_id));
4309 }
4310
4311 RTE_INIT(eth_dev_init_cb_lists)
4312 {
4313         uint16_t i;
4314
4315         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4316                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4317 }
4318
4319 int
4320 rte_eth_dev_callback_register(uint16_t port_id,
4321                         enum rte_eth_event_type event,
4322                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4323 {
4324         struct rte_eth_dev *dev;
4325         struct rte_eth_dev_callback *user_cb;
4326         uint16_t next_port;
4327         uint16_t last_port;
4328
4329         if (!cb_fn)
4330                 return -EINVAL;
4331
4332         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4333                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4334                 return -EINVAL;
4335         }
4336
4337         if (port_id == RTE_ETH_ALL) {
4338                 next_port = 0;
4339                 last_port = RTE_MAX_ETHPORTS - 1;
4340         } else {
4341                 next_port = last_port = port_id;
4342         }
4343
4344         rte_spinlock_lock(&eth_dev_cb_lock);
4345
4346         do {
4347                 dev = &rte_eth_devices[next_port];
4348
4349                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4350                         if (user_cb->cb_fn == cb_fn &&
4351                                 user_cb->cb_arg == cb_arg &&
4352                                 user_cb->event == event) {
4353                                 break;
4354                         }
4355                 }
4356
4357                 /* create a new callback. */
4358                 if (user_cb == NULL) {
4359                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4360                                 sizeof(struct rte_eth_dev_callback), 0);
4361                         if (user_cb != NULL) {
4362                                 user_cb->cb_fn = cb_fn;
4363                                 user_cb->cb_arg = cb_arg;
4364                                 user_cb->event = event;
4365                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4366                                                   user_cb, next);
4367                         } else {
4368                                 rte_spinlock_unlock(&eth_dev_cb_lock);
4369                                 rte_eth_dev_callback_unregister(port_id, event,
4370                                                                 cb_fn, cb_arg);
4371                                 return -ENOMEM;
4372                         }
4373
4374                 }
4375         } while (++next_port <= last_port);
4376
4377         rte_spinlock_unlock(&eth_dev_cb_lock);
4378         return 0;
4379 }
4380
4381 int
4382 rte_eth_dev_callback_unregister(uint16_t port_id,
4383                         enum rte_eth_event_type event,
4384                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4385 {
4386         int ret;
4387         struct rte_eth_dev *dev;
4388         struct rte_eth_dev_callback *cb, *next;
4389         uint16_t next_port;
4390         uint16_t last_port;
4391
4392         if (!cb_fn)
4393                 return -EINVAL;
4394
4395         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4396                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4397                 return -EINVAL;
4398         }
4399
4400         if (port_id == RTE_ETH_ALL) {
4401                 next_port = 0;
4402                 last_port = RTE_MAX_ETHPORTS - 1;
4403         } else {
4404                 next_port = last_port = port_id;
4405         }
4406
4407         rte_spinlock_lock(&eth_dev_cb_lock);
4408
4409         do {
4410                 dev = &rte_eth_devices[next_port];
4411                 ret = 0;
4412                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4413                      cb = next) {
4414
4415                         next = TAILQ_NEXT(cb, next);
4416
4417                         if (cb->cb_fn != cb_fn || cb->event != event ||
4418                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4419                                 continue;
4420
4421                         /*
4422                          * if this callback is not executing right now,
4423                          * then remove it.
4424                          */
4425                         if (cb->active == 0) {
4426                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4427                                 rte_free(cb);
4428                         } else {
4429                                 ret = -EAGAIN;
4430                         }
4431                 }
4432         } while (++next_port <= last_port);
4433
4434         rte_spinlock_unlock(&eth_dev_cb_lock);
4435         return ret;
4436 }
4437
4438 int
4439 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4440         enum rte_eth_event_type event, void *ret_param)
4441 {
4442         struct rte_eth_dev_callback *cb_lst;
4443         struct rte_eth_dev_callback dev_cb;
4444         int rc = 0;
4445
4446         rte_spinlock_lock(&eth_dev_cb_lock);
4447         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4448                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4449                         continue;
4450                 dev_cb = *cb_lst;
4451                 cb_lst->active = 1;
4452                 if (ret_param != NULL)
4453                         dev_cb.ret_param = ret_param;
4454
4455                 rte_spinlock_unlock(&eth_dev_cb_lock);
4456                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4457                                 dev_cb.cb_arg, dev_cb.ret_param);
4458                 rte_spinlock_lock(&eth_dev_cb_lock);
4459                 cb_lst->active = 0;
4460         }
4461         rte_spinlock_unlock(&eth_dev_cb_lock);
4462         return rc;
4463 }
4464
4465 void
4466 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4467 {
4468         if (dev == NULL)
4469                 return;
4470
4471         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4472
4473         dev->state = RTE_ETH_DEV_ATTACHED;
4474 }
4475
4476 int
4477 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4478 {
4479         uint32_t vec;
4480         struct rte_eth_dev *dev;
4481         struct rte_intr_handle *intr_handle;
4482         uint16_t qid;
4483         int rc;
4484
4485         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4486
4487         dev = &rte_eth_devices[port_id];
4488
4489         if (!dev->intr_handle) {
4490                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4491                 return -ENOTSUP;
4492         }
4493
4494         intr_handle = dev->intr_handle;
4495         if (!intr_handle->intr_vec) {
4496                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4497                 return -EPERM;
4498         }
4499
4500         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4501                 vec = intr_handle->intr_vec[qid];
4502                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4503                 if (rc && rc != -EEXIST) {
4504                         RTE_ETHDEV_LOG(ERR,
4505                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4506                                 port_id, qid, op, epfd, vec);
4507                 }
4508         }
4509
4510         return 0;
4511 }
4512
4513 int
4514 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4515 {
4516         struct rte_intr_handle *intr_handle;
4517         struct rte_eth_dev *dev;
4518         unsigned int efd_idx;
4519         uint32_t vec;
4520         int fd;
4521
4522         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4523
4524         dev = &rte_eth_devices[port_id];
4525
4526         if (queue_id >= dev->data->nb_rx_queues) {
4527                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4528                 return -1;
4529         }
4530
4531         if (!dev->intr_handle) {
4532                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4533                 return -1;
4534         }
4535
4536         intr_handle = dev->intr_handle;
4537         if (!intr_handle->intr_vec) {
4538                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4539                 return -1;
4540         }
4541
4542         vec = intr_handle->intr_vec[queue_id];
4543         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4544                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4545         fd = intr_handle->efds[efd_idx];
4546
4547         return fd;
4548 }
4549
4550 static inline int
4551 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4552                 const char *ring_name)
4553 {
4554         return snprintf(name, len, "eth_p%d_q%d_%s",
4555                         port_id, queue_id, ring_name);
4556 }
4557
4558 const struct rte_memzone *
4559 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4560                          uint16_t queue_id, size_t size, unsigned align,
4561                          int socket_id)
4562 {
4563         char z_name[RTE_MEMZONE_NAMESIZE];
4564         const struct rte_memzone *mz;
4565         int rc;
4566
4567         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4568                         queue_id, ring_name);
4569         if (rc >= RTE_MEMZONE_NAMESIZE) {
4570                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4571                 rte_errno = ENAMETOOLONG;
4572                 return NULL;
4573         }
4574
4575         mz = rte_memzone_lookup(z_name);
4576         if (mz) {
4577                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4578                                 size > mz->len ||
4579                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4580                         RTE_ETHDEV_LOG(ERR,
4581                                 "memzone %s does not justify the requested attributes\n",
4582                                 mz->name);
4583                         return NULL;
4584                 }
4585
4586                 return mz;
4587         }
4588
4589         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4590                         RTE_MEMZONE_IOVA_CONTIG, align);
4591 }
4592
4593 int
4594 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4595                 uint16_t queue_id)
4596 {
4597         char z_name[RTE_MEMZONE_NAMESIZE];
4598         const struct rte_memzone *mz;
4599         int rc = 0;
4600
4601         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4602                         queue_id, ring_name);
4603         if (rc >= RTE_MEMZONE_NAMESIZE) {
4604                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4605                 return -ENAMETOOLONG;
4606         }
4607
4608         mz = rte_memzone_lookup(z_name);
4609         if (mz)
4610                 rc = rte_memzone_free(mz);
4611         else
4612                 rc = -ENOENT;
4613
4614         return rc;
4615 }
4616
4617 int
4618 rte_eth_dev_create(struct rte_device *device, const char *name,
4619         size_t priv_data_size,
4620         ethdev_bus_specific_init ethdev_bus_specific_init,
4621         void *bus_init_params,
4622         ethdev_init_t ethdev_init, void *init_params)
4623 {
4624         struct rte_eth_dev *ethdev;
4625         int retval;
4626
4627         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4628
4629         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4630                 ethdev = rte_eth_dev_allocate(name);
4631                 if (!ethdev)
4632                         return -ENODEV;
4633
4634                 if (priv_data_size) {
4635                         ethdev->data->dev_private = rte_zmalloc_socket(
4636                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4637                                 device->numa_node);
4638
4639                         if (!ethdev->data->dev_private) {
4640                                 RTE_ETHDEV_LOG(ERR,
4641                                         "failed to allocate private data\n");
4642                                 retval = -ENOMEM;
4643                                 goto probe_failed;
4644                         }
4645                 }
4646         } else {
4647                 ethdev = rte_eth_dev_attach_secondary(name);
4648                 if (!ethdev) {
4649                         RTE_ETHDEV_LOG(ERR,
4650                                 "secondary process attach failed, ethdev doesn't exist\n");
4651                         return  -ENODEV;
4652                 }
4653         }
4654
4655         ethdev->device = device;
4656
4657         if (ethdev_bus_specific_init) {
4658                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4659                 if (retval) {
4660                         RTE_ETHDEV_LOG(ERR,
4661                                 "ethdev bus specific initialisation failed\n");
4662                         goto probe_failed;
4663                 }
4664         }
4665
4666         retval = ethdev_init(ethdev, init_params);
4667         if (retval) {
4668                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4669                 goto probe_failed;
4670         }
4671
4672         rte_eth_dev_probing_finish(ethdev);
4673
4674         return retval;
4675
4676 probe_failed:
4677         rte_eth_dev_release_port(ethdev);
4678         return retval;
4679 }
4680
4681 int
4682 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4683         ethdev_uninit_t ethdev_uninit)
4684 {
4685         int ret;
4686
4687         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4688         if (!ethdev)
4689                 return -ENODEV;
4690
4691         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4692
4693         ret = ethdev_uninit(ethdev);
4694         if (ret)
4695                 return ret;
4696
4697         return rte_eth_dev_release_port(ethdev);
4698 }
4699
4700 int
4701 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4702                           int epfd, int op, void *data)
4703 {
4704         uint32_t vec;
4705         struct rte_eth_dev *dev;
4706         struct rte_intr_handle *intr_handle;
4707         int rc;
4708
4709         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4710
4711         dev = &rte_eth_devices[port_id];
4712         if (queue_id >= dev->data->nb_rx_queues) {
4713                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4714                 return -EINVAL;
4715         }
4716
4717         if (!dev->intr_handle) {
4718                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4719                 return -ENOTSUP;
4720         }
4721
4722         intr_handle = dev->intr_handle;
4723         if (!intr_handle->intr_vec) {
4724                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4725                 return -EPERM;
4726         }
4727
4728         vec = intr_handle->intr_vec[queue_id];
4729         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4730         if (rc && rc != -EEXIST) {
4731                 RTE_ETHDEV_LOG(ERR,
4732                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4733                         port_id, queue_id, op, epfd, vec);
4734                 return rc;
4735         }
4736
4737         return 0;
4738 }
4739
4740 int
4741 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4742                            uint16_t queue_id)
4743 {
4744         struct rte_eth_dev *dev;
4745         int ret;
4746
4747         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4748
4749         dev = &rte_eth_devices[port_id];
4750
4751         ret = eth_dev_validate_rx_queue(dev, queue_id);
4752         if (ret != 0)
4753                 return ret;
4754
4755         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4756         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4757                                                                 queue_id));
4758 }
4759
4760 int
4761 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4762                             uint16_t queue_id)
4763 {
4764         struct rte_eth_dev *dev;
4765         int ret;
4766
4767         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4768
4769         dev = &rte_eth_devices[port_id];
4770
4771         ret = eth_dev_validate_rx_queue(dev, queue_id);
4772         if (ret != 0)
4773                 return ret;
4774
4775         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4776         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4777                                                                 queue_id));
4778 }
4779
4780
4781 const struct rte_eth_rxtx_callback *
4782 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4783                 rte_rx_callback_fn fn, void *user_param)
4784 {
4785 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4786         rte_errno = ENOTSUP;
4787         return NULL;
4788 #endif
4789         struct rte_eth_dev *dev;
4790
4791         /* check input parameters */
4792         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4793                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4794                 rte_errno = EINVAL;
4795                 return NULL;
4796         }
4797         dev = &rte_eth_devices[port_id];
4798         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4799                 rte_errno = EINVAL;
4800                 return NULL;
4801         }
4802         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4803
4804         if (cb == NULL) {
4805                 rte_errno = ENOMEM;
4806                 return NULL;
4807         }
4808
4809         cb->fn.rx = fn;
4810         cb->param = user_param;
4811
4812         rte_spinlock_lock(&eth_dev_rx_cb_lock);
4813         /* Add the callbacks in fifo order. */
4814         struct rte_eth_rxtx_callback *tail =
4815                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4816
4817         if (!tail) {
4818                 /* Stores to cb->fn and cb->param should complete before
4819                  * cb is visible to data plane.
4820                  */
4821                 __atomic_store_n(
4822                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4823                         cb, __ATOMIC_RELEASE);
4824
4825         } else {
4826                 while (tail->next)
4827                         tail = tail->next;
4828                 /* Stores to cb->fn and cb->param should complete before
4829                  * cb is visible to data plane.
4830                  */
4831                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4832         }
4833         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4834
4835         return cb;
4836 }
4837
4838 const struct rte_eth_rxtx_callback *
4839 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4840                 rte_rx_callback_fn fn, void *user_param)
4841 {
4842 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4843         rte_errno = ENOTSUP;
4844         return NULL;
4845 #endif
4846         /* check input parameters */
4847         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4848                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4849                 rte_errno = EINVAL;
4850                 return NULL;
4851         }
4852
4853         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4854
4855         if (cb == NULL) {
4856                 rte_errno = ENOMEM;
4857                 return NULL;
4858         }
4859
4860         cb->fn.rx = fn;
4861         cb->param = user_param;
4862
4863         rte_spinlock_lock(&eth_dev_rx_cb_lock);
4864         /* Add the callbacks at first position */
4865         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4866         /* Stores to cb->fn, cb->param and cb->next should complete before
4867          * cb is visible to data plane threads.
4868          */
4869         __atomic_store_n(
4870                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4871                 cb, __ATOMIC_RELEASE);
4872         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4873
4874         return cb;
4875 }
4876
4877 const struct rte_eth_rxtx_callback *
4878 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4879                 rte_tx_callback_fn fn, void *user_param)
4880 {
4881 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4882         rte_errno = ENOTSUP;
4883         return NULL;
4884 #endif
4885         struct rte_eth_dev *dev;
4886
4887         /* check input parameters */
4888         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4889                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4890                 rte_errno = EINVAL;
4891                 return NULL;
4892         }
4893
4894         dev = &rte_eth_devices[port_id];
4895         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4896                 rte_errno = EINVAL;
4897                 return NULL;
4898         }
4899
4900         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4901
4902         if (cb == NULL) {
4903                 rte_errno = ENOMEM;
4904                 return NULL;
4905         }
4906
4907         cb->fn.tx = fn;
4908         cb->param = user_param;
4909
4910         rte_spinlock_lock(&eth_dev_tx_cb_lock);
4911         /* Add the callbacks in fifo order. */
4912         struct rte_eth_rxtx_callback *tail =
4913                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4914
4915         if (!tail) {
4916                 /* Stores to cb->fn and cb->param should complete before
4917                  * cb is visible to data plane.
4918                  */
4919                 __atomic_store_n(
4920                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4921                         cb, __ATOMIC_RELEASE);
4922
4923         } else {
4924                 while (tail->next)
4925                         tail = tail->next;
4926                 /* Stores to cb->fn and cb->param should complete before
4927                  * cb is visible to data plane.
4928                  */
4929                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4930         }
4931         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4932
4933         return cb;
4934 }
4935
4936 int
4937 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4938                 const struct rte_eth_rxtx_callback *user_cb)
4939 {
4940 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4941         return -ENOTSUP;
4942 #endif
4943         /* Check input parameters. */
4944         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4945         if (user_cb == NULL ||
4946                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4947                 return -EINVAL;
4948
4949         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4950         struct rte_eth_rxtx_callback *cb;
4951         struct rte_eth_rxtx_callback **prev_cb;
4952         int ret = -EINVAL;
4953
4954         rte_spinlock_lock(&eth_dev_rx_cb_lock);
4955         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4956         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4957                 cb = *prev_cb;
4958                 if (cb == user_cb) {
4959                         /* Remove the user cb from the callback list. */
4960                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4961                         ret = 0;
4962                         break;
4963                 }
4964         }
4965         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4966
4967         return ret;
4968 }
4969
4970 int
4971 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4972                 const struct rte_eth_rxtx_callback *user_cb)
4973 {
4974 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4975         return -ENOTSUP;
4976 #endif
4977         /* Check input parameters. */
4978         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4979         if (user_cb == NULL ||
4980                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4981                 return -EINVAL;
4982
4983         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4984         int ret = -EINVAL;
4985         struct rte_eth_rxtx_callback *cb;
4986         struct rte_eth_rxtx_callback **prev_cb;
4987
4988         rte_spinlock_lock(&eth_dev_tx_cb_lock);
4989         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4990         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4991                 cb = *prev_cb;
4992                 if (cb == user_cb) {
4993                         /* Remove the user cb from the callback list. */
4994                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4995                         ret = 0;
4996                         break;
4997                 }
4998         }
4999         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5000
5001         return ret;
5002 }
5003
5004 int
5005 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5006         struct rte_eth_rxq_info *qinfo)
5007 {
5008         struct rte_eth_dev *dev;
5009
5010         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5011
5012         if (qinfo == NULL)
5013                 return -EINVAL;
5014
5015         dev = &rte_eth_devices[port_id];
5016         if (queue_id >= dev->data->nb_rx_queues) {
5017                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5018                 return -EINVAL;
5019         }
5020
5021         if (dev->data->rx_queues == NULL ||
5022                         dev->data->rx_queues[queue_id] == NULL) {
5023                 RTE_ETHDEV_LOG(ERR,
5024                                "Rx queue %"PRIu16" of device with port_id=%"
5025                                PRIu16" has not been setup\n",
5026                                queue_id, port_id);
5027                 return -EINVAL;
5028         }
5029
5030         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5031                 RTE_ETHDEV_LOG(INFO,
5032                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5033                         queue_id, port_id);
5034                 return -EINVAL;
5035         }
5036
5037         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5038
5039         memset(qinfo, 0, sizeof(*qinfo));
5040         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5041         qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5042
5043         return 0;
5044 }
5045
5046 int
5047 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5048         struct rte_eth_txq_info *qinfo)
5049 {
5050         struct rte_eth_dev *dev;
5051
5052         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5053
5054         if (qinfo == NULL)
5055                 return -EINVAL;
5056
5057         dev = &rte_eth_devices[port_id];
5058         if (queue_id >= dev->data->nb_tx_queues) {
5059                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5060                 return -EINVAL;
5061         }
5062
5063         if (dev->data->tx_queues == NULL ||
5064                         dev->data->tx_queues[queue_id] == NULL) {
5065                 RTE_ETHDEV_LOG(ERR,
5066                                "Tx queue %"PRIu16" of device with port_id=%"
5067                                PRIu16" has not been setup\n",
5068                                queue_id, port_id);
5069                 return -EINVAL;
5070         }
5071
5072         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5073                 RTE_ETHDEV_LOG(INFO,
5074                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5075                         queue_id, port_id);
5076                 return -EINVAL;
5077         }
5078
5079         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5080
5081         memset(qinfo, 0, sizeof(*qinfo));
5082         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5083         qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5084
5085         return 0;
5086 }
5087
5088 int
5089 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5090                           struct rte_eth_burst_mode *mode)
5091 {
5092         struct rte_eth_dev *dev;
5093
5094         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5095
5096         if (mode == NULL)
5097                 return -EINVAL;
5098
5099         dev = &rte_eth_devices[port_id];
5100
5101         if (queue_id >= dev->data->nb_rx_queues) {
5102                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5103                 return -EINVAL;
5104         }
5105
5106         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5107         memset(mode, 0, sizeof(*mode));
5108         return eth_err(port_id,
5109                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5110 }
5111
5112 int
5113 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5114                           struct rte_eth_burst_mode *mode)
5115 {
5116         struct rte_eth_dev *dev;
5117
5118         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5119
5120         if (mode == NULL)
5121                 return -EINVAL;
5122
5123         dev = &rte_eth_devices[port_id];
5124
5125         if (queue_id >= dev->data->nb_tx_queues) {
5126                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5127                 return -EINVAL;
5128         }
5129
5130         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5131         memset(mode, 0, sizeof(*mode));
5132         return eth_err(port_id,
5133                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5134 }
5135
5136 int
5137 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5138                 struct rte_power_monitor_cond *pmc)
5139 {
5140         struct rte_eth_dev *dev;
5141
5142         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5143
5144         dev = &rte_eth_devices[port_id];
5145
5146         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
5147
5148         if (queue_id >= dev->data->nb_rx_queues) {
5149                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5150                 return -EINVAL;
5151         }
5152
5153         if (pmc == NULL) {
5154                 RTE_ETHDEV_LOG(ERR, "Invalid power monitor condition=%p\n",
5155                                 pmc);
5156                 return -EINVAL;
5157         }
5158
5159         return eth_err(port_id,
5160                 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id],
5161                         pmc));
5162 }
5163
5164 int
5165 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5166                              struct rte_ether_addr *mc_addr_set,
5167                              uint32_t nb_mc_addr)
5168 {
5169         struct rte_eth_dev *dev;
5170
5171         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5172
5173         dev = &rte_eth_devices[port_id];
5174         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5175         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5176                                                 mc_addr_set, nb_mc_addr));
5177 }
5178
5179 int
5180 rte_eth_timesync_enable(uint16_t port_id)
5181 {
5182         struct rte_eth_dev *dev;
5183
5184         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5185         dev = &rte_eth_devices[port_id];
5186
5187         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5188         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5189 }
5190
5191 int
5192 rte_eth_timesync_disable(uint16_t port_id)
5193 {
5194         struct rte_eth_dev *dev;
5195
5196         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5197         dev = &rte_eth_devices[port_id];
5198
5199         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5200         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5201 }
5202
5203 int
5204 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5205                                    uint32_t flags)
5206 {
5207         struct rte_eth_dev *dev;
5208
5209         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5210         dev = &rte_eth_devices[port_id];
5211
5212         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5213         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5214                                 (dev, timestamp, flags));
5215 }
5216
5217 int
5218 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5219                                    struct timespec *timestamp)
5220 {
5221         struct rte_eth_dev *dev;
5222
5223         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5224         dev = &rte_eth_devices[port_id];
5225
5226         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5227         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5228                                 (dev, timestamp));
5229 }
5230
5231 int
5232 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5233 {
5234         struct rte_eth_dev *dev;
5235
5236         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5237         dev = &rte_eth_devices[port_id];
5238
5239         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5240         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
5241                                                                       delta));
5242 }
5243
5244 int
5245 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5246 {
5247         struct rte_eth_dev *dev;
5248
5249         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5250         dev = &rte_eth_devices[port_id];
5251
5252         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5253         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5254                                                                 timestamp));
5255 }
5256
5257 int
5258 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5259 {
5260         struct rte_eth_dev *dev;
5261
5262         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5263         dev = &rte_eth_devices[port_id];
5264
5265         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5266         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5267                                                                 timestamp));
5268 }
5269
5270 int
5271 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5272 {
5273         struct rte_eth_dev *dev;
5274
5275         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5276         dev = &rte_eth_devices[port_id];
5277
5278         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5279         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5280 }
5281
5282 int
5283 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5284 {
5285         struct rte_eth_dev *dev;
5286
5287         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5288         if (info == NULL)
5289                 return -EINVAL;
5290
5291         dev = &rte_eth_devices[port_id];
5292         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5293         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5294 }
5295
5296 int
5297 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5298 {
5299         struct rte_eth_dev *dev;
5300
5301         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5302
5303         dev = &rte_eth_devices[port_id];
5304         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5305         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5306 }
5307
5308 int
5309 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5310 {
5311         struct rte_eth_dev *dev;
5312
5313         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5314         if (info == NULL)
5315                 return -EINVAL;
5316
5317         dev = &rte_eth_devices[port_id];
5318         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5319         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5320 }
5321
5322 int
5323 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5324 {
5325         struct rte_eth_dev *dev;
5326
5327         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5328         if (info == NULL)
5329                 return -EINVAL;
5330
5331         dev = &rte_eth_devices[port_id];
5332         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5333         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5334 }
5335
5336 int
5337 rte_eth_dev_get_module_info(uint16_t port_id,
5338                             struct rte_eth_dev_module_info *modinfo)
5339 {
5340         struct rte_eth_dev *dev;
5341
5342         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5343         if (modinfo == NULL)
5344                 return -EINVAL;
5345
5346         dev = &rte_eth_devices[port_id];
5347         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5348         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5349 }
5350
5351 int
5352 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5353                               struct rte_dev_eeprom_info *info)
5354 {
5355         struct rte_eth_dev *dev;
5356
5357         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5358         if (info == NULL || info->data == NULL || info->length == 0)
5359                 return -EINVAL;
5360
5361         dev = &rte_eth_devices[port_id];
5362         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5363         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5364 }
5365
5366 int
5367 rte_eth_dev_get_dcb_info(uint16_t port_id,
5368                              struct rte_eth_dcb_info *dcb_info)
5369 {
5370         struct rte_eth_dev *dev;
5371
5372         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5373
5374         dev = &rte_eth_devices[port_id];
5375         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5376
5377         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5378         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5379 }
5380
5381 static void
5382 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5383                 const struct rte_eth_desc_lim *desc_lim)
5384 {
5385         if (desc_lim->nb_align != 0)
5386                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5387
5388         if (desc_lim->nb_max != 0)
5389                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5390
5391         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5392 }
5393
5394 int
5395 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5396                                  uint16_t *nb_rx_desc,
5397                                  uint16_t *nb_tx_desc)
5398 {
5399         struct rte_eth_dev_info dev_info;
5400         int ret;
5401
5402         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5403
5404         ret = rte_eth_dev_info_get(port_id, &dev_info);
5405         if (ret != 0)
5406                 return ret;
5407
5408         if (nb_rx_desc != NULL)
5409                 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5410
5411         if (nb_tx_desc != NULL)
5412                 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5413
5414         return 0;
5415 }
5416
5417 int
5418 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5419                                    struct rte_eth_hairpin_cap *cap)
5420 {
5421         struct rte_eth_dev *dev;
5422
5423         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5424
5425         dev = &rte_eth_devices[port_id];
5426         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5427         memset(cap, 0, sizeof(*cap));
5428         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5429 }
5430
5431 int
5432 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5433 {
5434         if (dev->data->rx_queue_state[queue_id] ==
5435             RTE_ETH_QUEUE_STATE_HAIRPIN)
5436                 return 1;
5437         return 0;
5438 }
5439
5440 int
5441 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5442 {
5443         if (dev->data->tx_queue_state[queue_id] ==
5444             RTE_ETH_QUEUE_STATE_HAIRPIN)
5445                 return 1;
5446         return 0;
5447 }
5448
5449 int
5450 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5451 {
5452         struct rte_eth_dev *dev;
5453
5454         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5455
5456         if (pool == NULL)
5457                 return -EINVAL;
5458
5459         dev = &rte_eth_devices[port_id];
5460
5461         if (*dev->dev_ops->pool_ops_supported == NULL)
5462                 return 1; /* all pools are supported */
5463
5464         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5465 }
5466
5467 /**
5468  * A set of values to describe the possible states of a switch domain.
5469  */
5470 enum rte_eth_switch_domain_state {
5471         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5472         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5473 };
5474
5475 /**
5476  * Array of switch domains available for allocation. Array is sized to
5477  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5478  * ethdev ports in a single process.
5479  */
5480 static struct rte_eth_dev_switch {
5481         enum rte_eth_switch_domain_state state;
5482 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
5483
5484 int
5485 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5486 {
5487         uint16_t i;
5488
5489         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5490
5491         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5492                 if (eth_dev_switch_domains[i].state ==
5493                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5494                         eth_dev_switch_domains[i].state =
5495                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5496                         *domain_id = i;
5497                         return 0;
5498                 }
5499         }
5500
5501         return -ENOSPC;
5502 }
5503
5504 int
5505 rte_eth_switch_domain_free(uint16_t domain_id)
5506 {
5507         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5508                 domain_id >= RTE_MAX_ETHPORTS)
5509                 return -EINVAL;
5510
5511         if (eth_dev_switch_domains[domain_id].state !=
5512                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5513                 return -EINVAL;
5514
5515         eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5516
5517         return 0;
5518 }
5519
5520 static int
5521 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5522 {
5523         int state;
5524         struct rte_kvargs_pair *pair;
5525         char *letter;
5526
5527         arglist->str = strdup(str_in);
5528         if (arglist->str == NULL)
5529                 return -ENOMEM;
5530
5531         letter = arglist->str;
5532         state = 0;
5533         arglist->count = 0;
5534         pair = &arglist->pairs[0];
5535         while (1) {
5536                 switch (state) {
5537                 case 0: /* Initial */
5538                         if (*letter == '=')
5539                                 return -EINVAL;
5540                         else if (*letter == '\0')
5541                                 return 0;
5542
5543                         state = 1;
5544                         pair->key = letter;
5545                         /* fall-thru */
5546
5547                 case 1: /* Parsing key */
5548                         if (*letter == '=') {
5549                                 *letter = '\0';
5550                                 pair->value = letter + 1;
5551                                 state = 2;
5552                         } else if (*letter == ',' || *letter == '\0')
5553                                 return -EINVAL;
5554                         break;
5555
5556
5557                 case 2: /* Parsing value */
5558                         if (*letter == '[')
5559                                 state = 3;
5560                         else if (*letter == ',') {
5561                                 *letter = '\0';
5562                                 arglist->count++;
5563                                 pair = &arglist->pairs[arglist->count];
5564                                 state = 0;
5565                         } else if (*letter == '\0') {
5566                                 letter--;
5567                                 arglist->count++;
5568                                 pair = &arglist->pairs[arglist->count];
5569                                 state = 0;
5570                         }
5571                         break;
5572
5573                 case 3: /* Parsing list */
5574                         if (*letter == ']')
5575                                 state = 2;
5576                         else if (*letter == '\0')
5577                                 return -EINVAL;
5578                         break;
5579                 }
5580                 letter++;
5581         }
5582 }
5583
5584 int
5585 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5586 {
5587         struct rte_kvargs args;
5588         struct rte_kvargs_pair *pair;
5589         unsigned int i;
5590         int result = 0;
5591
5592         memset(eth_da, 0, sizeof(*eth_da));
5593
5594         result = eth_dev_devargs_tokenise(&args, dargs);
5595         if (result < 0)
5596                 goto parse_cleanup;
5597
5598         for (i = 0; i < args.count; i++) {
5599                 pair = &args.pairs[i];
5600                 if (strcmp("representor", pair->key) == 0) {
5601                         if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
5602                                 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
5603                                         dargs);
5604                                 result = -1;
5605                                 goto parse_cleanup;
5606                         }
5607                         result = rte_eth_devargs_parse_representor_ports(
5608                                         pair->value, eth_da);
5609                         if (result < 0)
5610                                 goto parse_cleanup;
5611                 }
5612         }
5613
5614 parse_cleanup:
5615         if (args.str)
5616                 free(args.str);
5617
5618         return result;
5619 }
5620
5621 int
5622 rte_eth_representor_id_get(const struct rte_eth_dev *ethdev,
5623                            enum rte_eth_representor_type type,
5624                            int controller, int pf, int representor_port,
5625                            uint16_t *repr_id)
5626 {
5627         int ret, n, i, count;
5628         struct rte_eth_representor_info *info = NULL;
5629         size_t size;
5630
5631         if (type == RTE_ETH_REPRESENTOR_NONE)
5632                 return 0;
5633         if (repr_id == NULL)
5634                 return -EINVAL;
5635
5636         /* Get PMD representor range info. */
5637         ret = rte_eth_representor_info_get(ethdev->data->port_id, NULL);
5638         if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
5639             controller == -1 && pf == -1) {
5640                 /* Direct mapping for legacy VF representor. */
5641                 *repr_id = representor_port;
5642                 return 0;
5643         } else if (ret < 0) {
5644                 return ret;
5645         }
5646         n = ret;
5647         size = sizeof(*info) + n * sizeof(info->ranges[0]);
5648         info = calloc(1, size);
5649         if (info == NULL)
5650                 return -ENOMEM;
5651         ret = rte_eth_representor_info_get(ethdev->data->port_id, info);
5652         if (ret < 0)
5653                 goto out;
5654
5655         /* Default controller and pf to caller. */
5656         if (controller == -1)
5657                 controller = info->controller;
5658         if (pf == -1)
5659                 pf = info->pf;
5660
5661         /* Locate representor ID. */
5662         ret = -ENOENT;
5663         for (i = 0; i < n; ++i) {
5664                 if (info->ranges[i].type != type)
5665                         continue;
5666                 if (info->ranges[i].controller != controller)
5667                         continue;
5668                 if (info->ranges[i].id_end < info->ranges[i].id_base) {
5669                         RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
5670                                 ethdev->data->port_id, info->ranges[i].id_base,
5671                                 info->ranges[i].id_end, i);
5672                         continue;
5673
5674                 }
5675                 count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
5676                 switch (info->ranges[i].type) {
5677                 case RTE_ETH_REPRESENTOR_PF:
5678                         if (pf < info->ranges[i].pf ||
5679                             pf >= info->ranges[i].pf + count)
5680                                 continue;
5681                         *repr_id = info->ranges[i].id_base +
5682                                    (pf - info->ranges[i].pf);
5683                         ret = 0;
5684                         goto out;
5685                 case RTE_ETH_REPRESENTOR_VF:
5686                         if (info->ranges[i].pf != pf)
5687                                 continue;
5688                         if (representor_port < info->ranges[i].vf ||
5689                             representor_port >= info->ranges[i].vf + count)
5690                                 continue;
5691                         *repr_id = info->ranges[i].id_base +
5692                                    (representor_port - info->ranges[i].vf);
5693                         ret = 0;
5694                         goto out;
5695                 case RTE_ETH_REPRESENTOR_SF:
5696                         if (info->ranges[i].pf != pf)
5697                                 continue;
5698                         if (representor_port < info->ranges[i].sf ||
5699                             representor_port >= info->ranges[i].sf + count)
5700                                 continue;
5701                         *repr_id = info->ranges[i].id_base +
5702                               (representor_port - info->ranges[i].sf);
5703                         ret = 0;
5704                         goto out;
5705                 default:
5706                         break;
5707                 }
5708         }
5709 out:
5710         free(info);
5711         return ret;
5712 }
5713
5714 static int
5715 eth_dev_handle_port_list(const char *cmd __rte_unused,
5716                 const char *params __rte_unused,
5717                 struct rte_tel_data *d)
5718 {
5719         int port_id;
5720
5721         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5722         RTE_ETH_FOREACH_DEV(port_id)
5723                 rte_tel_data_add_array_int(d, port_id);
5724         return 0;
5725 }
5726
5727 static void
5728 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5729                 const char *stat_name)
5730 {
5731         int q;
5732         struct rte_tel_data *q_data = rte_tel_data_alloc();
5733         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5734         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5735                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
5736         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5737 }
5738
5739 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5740
5741 static int
5742 eth_dev_handle_port_stats(const char *cmd __rte_unused,
5743                 const char *params,
5744                 struct rte_tel_data *d)
5745 {
5746         struct rte_eth_stats stats;
5747         int port_id, ret;
5748
5749         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5750                 return -1;
5751
5752         port_id = atoi(params);
5753         if (!rte_eth_dev_is_valid_port(port_id))
5754                 return -1;
5755
5756         ret = rte_eth_stats_get(port_id, &stats);
5757         if (ret < 0)
5758                 return -1;
5759
5760         rte_tel_data_start_dict(d);
5761         ADD_DICT_STAT(stats, ipackets);
5762         ADD_DICT_STAT(stats, opackets);
5763         ADD_DICT_STAT(stats, ibytes);
5764         ADD_DICT_STAT(stats, obytes);
5765         ADD_DICT_STAT(stats, imissed);
5766         ADD_DICT_STAT(stats, ierrors);
5767         ADD_DICT_STAT(stats, oerrors);
5768         ADD_DICT_STAT(stats, rx_nombuf);
5769         eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5770         eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5771         eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5772         eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5773         eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
5774
5775         return 0;
5776 }
5777
5778 static int
5779 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
5780                 const char *params,
5781                 struct rte_tel_data *d)
5782 {
5783         struct rte_eth_xstat *eth_xstats;
5784         struct rte_eth_xstat_name *xstat_names;
5785         int port_id, num_xstats;
5786         int i, ret;
5787         char *end_param;
5788
5789         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5790                 return -1;
5791
5792         port_id = strtoul(params, &end_param, 0);
5793         if (*end_param != '\0')
5794                 RTE_ETHDEV_LOG(NOTICE,
5795                         "Extra parameters passed to ethdev telemetry command, ignoring");
5796         if (!rte_eth_dev_is_valid_port(port_id))
5797                 return -1;
5798
5799         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5800         if (num_xstats < 0)
5801                 return -1;
5802
5803         /* use one malloc for both names and stats */
5804         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5805                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
5806         if (eth_xstats == NULL)
5807                 return -1;
5808         xstat_names = (void *)&eth_xstats[num_xstats];
5809
5810         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5811         if (ret < 0 || ret > num_xstats) {
5812                 free(eth_xstats);
5813                 return -1;
5814         }
5815
5816         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5817         if (ret < 0 || ret > num_xstats) {
5818                 free(eth_xstats);
5819                 return -1;
5820         }
5821
5822         rte_tel_data_start_dict(d);
5823         for (i = 0; i < num_xstats; i++)
5824                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5825                                 eth_xstats[i].value);
5826         return 0;
5827 }
5828
5829 static int
5830 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
5831                 const char *params,
5832                 struct rte_tel_data *d)
5833 {
5834         static const char *status_str = "status";
5835         int ret, port_id;
5836         struct rte_eth_link link;
5837         char *end_param;
5838
5839         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5840                 return -1;
5841
5842         port_id = strtoul(params, &end_param, 0);
5843         if (*end_param != '\0')
5844                 RTE_ETHDEV_LOG(NOTICE,
5845                         "Extra parameters passed to ethdev telemetry command, ignoring");
5846         if (!rte_eth_dev_is_valid_port(port_id))
5847                 return -1;
5848
5849         ret = rte_eth_link_get_nowait(port_id, &link);
5850         if (ret < 0)
5851                 return -1;
5852
5853         rte_tel_data_start_dict(d);
5854         if (!link.link_status) {
5855                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
5856                 return 0;
5857         }
5858         rte_tel_data_add_dict_string(d, status_str, "UP");
5859         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5860         rte_tel_data_add_dict_string(d, "duplex",
5861                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
5862                                 "full-duplex" : "half-duplex");
5863         return 0;
5864 }
5865
5866 int
5867 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
5868                                   struct rte_hairpin_peer_info *cur_info,
5869                                   struct rte_hairpin_peer_info *peer_info,
5870                                   uint32_t direction)
5871 {
5872         struct rte_eth_dev *dev;
5873
5874         /* Current queue information is not mandatory. */
5875         if (peer_info == NULL)
5876                 return -EINVAL;
5877
5878         /* No need to check the validity again. */
5879         dev = &rte_eth_devices[peer_port];
5880         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
5881                                 -ENOTSUP);
5882
5883         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
5884                                         cur_info, peer_info, direction);
5885 }
5886
5887 int
5888 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
5889                                 struct rte_hairpin_peer_info *peer_info,
5890                                 uint32_t direction)
5891 {
5892         struct rte_eth_dev *dev;
5893
5894         if (peer_info == NULL)
5895                 return -EINVAL;
5896
5897         /* No need to check the validity again. */
5898         dev = &rte_eth_devices[cur_port];
5899         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
5900                                 -ENOTSUP);
5901
5902         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
5903                                                         peer_info, direction);
5904 }
5905
5906 int
5907 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
5908                                   uint32_t direction)
5909 {
5910         struct rte_eth_dev *dev;
5911
5912         /* No need to check the validity again. */
5913         dev = &rte_eth_devices[cur_port];
5914         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
5915                                 -ENOTSUP);
5916
5917         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
5918                                                           direction);
5919 }
5920
5921 int
5922 rte_eth_representor_info_get(uint16_t port_id,
5923                              struct rte_eth_representor_info *info)
5924 {
5925         struct rte_eth_dev *dev;
5926
5927         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5928         dev = &rte_eth_devices[port_id];
5929
5930         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
5931         return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev,
5932                                                                       info));
5933 }
5934
5935 RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
5936
5937 RTE_INIT(ethdev_init_telemetry)
5938 {
5939         rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
5940                         "Returns list of available ethdev ports. Takes no parameters");
5941         rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
5942                         "Returns the common stats for a port. Parameters: int port_id");
5943         rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
5944                         "Returns the extended stats for a port. Parameters: int port_id");
5945         rte_telemetry_register_cmd("/ethdev/link_status",
5946                         eth_dev_handle_port_link_status,
5947                         "Returns the link status for a port. Parameters: int port_id");
5948 }