devargs: unify scratch buffer storage
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_kvargs.h>
38 #include <rte_class.h>
39 #include <rte_ether.h>
40 #include <rte_telemetry.h>
41
42 #include "rte_ethdev_trace.h"
43 #include "rte_ethdev.h"
44 #include "ethdev_driver.h"
45 #include "ethdev_profile.h"
46 #include "ethdev_private.h"
47
48 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
49 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
50
51 /* spinlock for eth device callbacks */
52 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* spinlock for add/remove rx callbacks */
55 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57 /* spinlock for add/remove tx callbacks */
58 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60 /* spinlock for shared data allocation */
61 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
62
63 /* store statistics names and its offset in stats structure  */
64 struct rte_eth_xstats_name_off {
65         char name[RTE_ETH_XSTATS_NAME_SIZE];
66         unsigned offset;
67 };
68
69 /* Shared memory between primary and secondary processes. */
70 static struct {
71         uint64_t next_owner_id;
72         rte_spinlock_t ownership_lock;
73         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
74 } *eth_dev_shared_data;
75
76 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
77         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
78         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
79         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
80         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
81         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
82         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
83         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
84         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
85                 rx_nombuf)},
86 };
87
88 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
89
90 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
91         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
92         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
93         {"errors", offsetof(struct rte_eth_stats, q_errors)},
94 };
95
96 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
97
98 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
99         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
100         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
101 };
102 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
103
104 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
105         { DEV_RX_OFFLOAD_##_name, #_name }
106
107 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)       \
108         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } eth_dev_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
133         RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
134 };
135
136 #undef RTE_RX_OFFLOAD_BIT2STR
137 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
138
139 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
140         { DEV_TX_OFFLOAD_##_name, #_name }
141
142 static const struct {
143         uint64_t offload;
144         const char *name;
145 } eth_dev_tx_offload_names[] = {
146         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
147         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
151         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
154         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
155         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
157         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
158         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
159         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
160         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
161         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
162         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
163         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
164         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
165         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
166         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
167         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
168 };
169
170 #undef RTE_TX_OFFLOAD_BIT2STR
171
172 /**
173  * The user application callback description.
174  *
175  * It contains callback address to be registered by user application,
176  * the pointer to the parameters for callback, and the event type.
177  */
178 struct rte_eth_dev_callback {
179         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
180         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
181         void *cb_arg;                           /**< Parameter for callback */
182         void *ret_param;                        /**< Return parameter */
183         enum rte_eth_event_type event;          /**< Interrupt event type */
184         uint32_t active;                        /**< Callback is executing */
185 };
186
187 enum {
188         STAT_QMAP_TX = 0,
189         STAT_QMAP_RX
190 };
191
192 int
193 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
194 {
195         int ret;
196         struct rte_devargs devargs;
197         const char *bus_param_key;
198         char *bus_str = NULL;
199         char *cls_str = NULL;
200         int str_size;
201
202         memset(iter, 0, sizeof(*iter));
203         memset(&devargs, 0, sizeof(devargs));
204
205         /*
206          * The devargs string may use various syntaxes:
207          *   - 0000:08:00.0,representor=[1-3]
208          *   - pci:0000:06:00.0,representor=[0,5]
209          *   - class=eth,mac=00:11:22:33:44:55
210          * A new syntax is in development (not yet supported):
211          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
212          */
213
214         /*
215          * Handle pure class filter (i.e. without any bus-level argument),
216          * from future new syntax.
217          * rte_devargs_parse() is not yet supporting the new syntax,
218          * that's why this simple case is temporarily parsed here.
219          */
220 #define iter_anybus_str "class=eth,"
221         if (strncmp(devargs_str, iter_anybus_str,
222                         strlen(iter_anybus_str)) == 0) {
223                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
224                 goto end;
225         }
226
227         /* Split bus, device and parameters. */
228         ret = rte_devargs_parse(&devargs, devargs_str);
229         if (ret != 0)
230                 goto error;
231
232         /*
233          * Assume parameters of old syntax can match only at ethdev level.
234          * Extra parameters will be ignored, thanks to "+" prefix.
235          */
236         str_size = strlen(devargs.args) + 2;
237         cls_str = malloc(str_size);
238         if (cls_str == NULL) {
239                 ret = -ENOMEM;
240                 goto error;
241         }
242         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
243         if (ret != str_size - 1) {
244                 ret = -EINVAL;
245                 goto error;
246         }
247         iter->cls_str = cls_str;
248
249         iter->bus = devargs.bus;
250         if (iter->bus->dev_iterate == NULL) {
251                 ret = -ENOTSUP;
252                 goto error;
253         }
254
255         /* Convert bus args to new syntax for use with new API dev_iterate. */
256         if (strcmp(iter->bus->name, "vdev") == 0) {
257                 bus_param_key = "name";
258         } else if (strcmp(iter->bus->name, "pci") == 0) {
259                 bus_param_key = "addr";
260         } else {
261                 ret = -ENOTSUP;
262                 goto error;
263         }
264         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
265         bus_str = malloc(str_size);
266         if (bus_str == NULL) {
267                 ret = -ENOMEM;
268                 goto error;
269         }
270         ret = snprintf(bus_str, str_size, "%s=%s",
271                         bus_param_key, devargs.name);
272         if (ret != str_size - 1) {
273                 ret = -EINVAL;
274                 goto error;
275         }
276         iter->bus_str = bus_str;
277
278 end:
279         iter->cls = rte_class_find_by_name("eth");
280         rte_devargs_reset(&devargs);
281         return 0;
282
283 error:
284         if (ret == -ENOTSUP)
285                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
286                                 iter->bus->name);
287         rte_devargs_reset(&devargs);
288         free(bus_str);
289         free(cls_str);
290         return ret;
291 }
292
293 uint16_t
294 rte_eth_iterator_next(struct rte_dev_iterator *iter)
295 {
296         if (iter->cls == NULL) /* invalid ethdev iterator */
297                 return RTE_MAX_ETHPORTS;
298
299         do { /* loop to try all matching rte_device */
300                 /* If not pure ethdev filter and */
301                 if (iter->bus != NULL &&
302                                 /* not in middle of rte_eth_dev iteration, */
303                                 iter->class_device == NULL) {
304                         /* get next rte_device to try. */
305                         iter->device = iter->bus->dev_iterate(
306                                         iter->device, iter->bus_str, iter);
307                         if (iter->device == NULL)
308                                 break; /* no more rte_device candidate */
309                 }
310                 /* A device is matching bus part, need to check ethdev part. */
311                 iter->class_device = iter->cls->dev_iterate(
312                                 iter->class_device, iter->cls_str, iter);
313                 if (iter->class_device != NULL)
314                         return eth_dev_to_id(iter->class_device); /* match */
315         } while (iter->bus != NULL); /* need to try next rte_device */
316
317         /* No more ethdev port to iterate. */
318         rte_eth_iterator_cleanup(iter);
319         return RTE_MAX_ETHPORTS;
320 }
321
322 void
323 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
324 {
325         if (iter->bus_str == NULL)
326                 return; /* nothing to free in pure class filter */
327         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
328         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
329         memset(iter, 0, sizeof(*iter));
330 }
331
332 uint16_t
333 rte_eth_find_next(uint16_t port_id)
334 {
335         while (port_id < RTE_MAX_ETHPORTS &&
336                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
337                 port_id++;
338
339         if (port_id >= RTE_MAX_ETHPORTS)
340                 return RTE_MAX_ETHPORTS;
341
342         return port_id;
343 }
344
345 /*
346  * Macro to iterate over all valid ports for internal usage.
347  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
348  */
349 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
350         for (port_id = rte_eth_find_next(0); \
351              port_id < RTE_MAX_ETHPORTS; \
352              port_id = rte_eth_find_next(port_id + 1))
353
354 uint16_t
355 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
356 {
357         port_id = rte_eth_find_next(port_id);
358         while (port_id < RTE_MAX_ETHPORTS &&
359                         rte_eth_devices[port_id].device != parent)
360                 port_id = rte_eth_find_next(port_id + 1);
361
362         return port_id;
363 }
364
365 uint16_t
366 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
367 {
368         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
369         return rte_eth_find_next_of(port_id,
370                         rte_eth_devices[ref_port_id].device);
371 }
372
373 static void
374 eth_dev_shared_data_prepare(void)
375 {
376         const unsigned flags = 0;
377         const struct rte_memzone *mz;
378
379         rte_spinlock_lock(&eth_dev_shared_data_lock);
380
381         if (eth_dev_shared_data == NULL) {
382                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
383                         /* Allocate port data and ownership shared memory. */
384                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
385                                         sizeof(*eth_dev_shared_data),
386                                         rte_socket_id(), flags);
387                 } else
388                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
389                 if (mz == NULL)
390                         rte_panic("Cannot allocate ethdev shared data\n");
391
392                 eth_dev_shared_data = mz->addr;
393                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
394                         eth_dev_shared_data->next_owner_id =
395                                         RTE_ETH_DEV_NO_OWNER + 1;
396                         rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
397                         memset(eth_dev_shared_data->data, 0,
398                                sizeof(eth_dev_shared_data->data));
399                 }
400         }
401
402         rte_spinlock_unlock(&eth_dev_shared_data_lock);
403 }
404
405 static bool
406 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
407 {
408         return ethdev->data->name[0] != '\0';
409 }
410
411 static struct rte_eth_dev *
412 eth_dev_allocated(const char *name)
413 {
414         uint16_t i;
415
416         RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
417
418         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
419                 if (rte_eth_devices[i].data != NULL &&
420                     strcmp(rte_eth_devices[i].data->name, name) == 0)
421                         return &rte_eth_devices[i];
422         }
423         return NULL;
424 }
425
426 struct rte_eth_dev *
427 rte_eth_dev_allocated(const char *name)
428 {
429         struct rte_eth_dev *ethdev;
430
431         eth_dev_shared_data_prepare();
432
433         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
434
435         ethdev = eth_dev_allocated(name);
436
437         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
438
439         return ethdev;
440 }
441
442 static uint16_t
443 eth_dev_find_free_port(void)
444 {
445         uint16_t i;
446
447         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
448                 /* Using shared name field to find a free port. */
449                 if (eth_dev_shared_data->data[i].name[0] == '\0') {
450                         RTE_ASSERT(rte_eth_devices[i].state ==
451                                    RTE_ETH_DEV_UNUSED);
452                         return i;
453                 }
454         }
455         return RTE_MAX_ETHPORTS;
456 }
457
458 static struct rte_eth_dev *
459 eth_dev_get(uint16_t port_id)
460 {
461         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
462
463         eth_dev->data = &eth_dev_shared_data->data[port_id];
464
465         return eth_dev;
466 }
467
468 struct rte_eth_dev *
469 rte_eth_dev_allocate(const char *name)
470 {
471         uint16_t port_id;
472         struct rte_eth_dev *eth_dev = NULL;
473         size_t name_len;
474
475         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
476         if (name_len == 0) {
477                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
478                 return NULL;
479         }
480
481         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
482                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
483                 return NULL;
484         }
485
486         eth_dev_shared_data_prepare();
487
488         /* Synchronize port creation between primary and secondary threads. */
489         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
490
491         if (eth_dev_allocated(name) != NULL) {
492                 RTE_ETHDEV_LOG(ERR,
493                         "Ethernet device with name %s already allocated\n",
494                         name);
495                 goto unlock;
496         }
497
498         port_id = eth_dev_find_free_port();
499         if (port_id == RTE_MAX_ETHPORTS) {
500                 RTE_ETHDEV_LOG(ERR,
501                         "Reached maximum number of Ethernet ports\n");
502                 goto unlock;
503         }
504
505         eth_dev = eth_dev_get(port_id);
506         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
507         eth_dev->data->port_id = port_id;
508         eth_dev->data->mtu = RTE_ETHER_MTU;
509         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
510
511 unlock:
512         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
513
514         return eth_dev;
515 }
516
517 /*
518  * Attach to a port already registered by the primary process, which
519  * makes sure that the same device would have the same port id both
520  * in the primary and secondary process.
521  */
522 struct rte_eth_dev *
523 rte_eth_dev_attach_secondary(const char *name)
524 {
525         uint16_t i;
526         struct rte_eth_dev *eth_dev = NULL;
527
528         eth_dev_shared_data_prepare();
529
530         /* Synchronize port attachment to primary port creation and release. */
531         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
532
533         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
534                 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
535                         break;
536         }
537         if (i == RTE_MAX_ETHPORTS) {
538                 RTE_ETHDEV_LOG(ERR,
539                         "Device %s is not driven by the primary process\n",
540                         name);
541         } else {
542                 eth_dev = eth_dev_get(i);
543                 RTE_ASSERT(eth_dev->data->port_id == i);
544         }
545
546         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
547         return eth_dev;
548 }
549
550 int
551 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
552 {
553         if (eth_dev == NULL)
554                 return -EINVAL;
555
556         eth_dev_shared_data_prepare();
557
558         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
559                 rte_eth_dev_callback_process(eth_dev,
560                                 RTE_ETH_EVENT_DESTROY, NULL);
561
562         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
563
564         eth_dev->state = RTE_ETH_DEV_UNUSED;
565         eth_dev->device = NULL;
566         eth_dev->process_private = NULL;
567         eth_dev->intr_handle = NULL;
568         eth_dev->rx_pkt_burst = NULL;
569         eth_dev->tx_pkt_burst = NULL;
570         eth_dev->tx_pkt_prepare = NULL;
571         eth_dev->rx_queue_count = NULL;
572         eth_dev->rx_descriptor_done = NULL;
573         eth_dev->rx_descriptor_status = NULL;
574         eth_dev->tx_descriptor_status = NULL;
575         eth_dev->dev_ops = NULL;
576
577         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
578                 rte_free(eth_dev->data->rx_queues);
579                 rte_free(eth_dev->data->tx_queues);
580                 rte_free(eth_dev->data->mac_addrs);
581                 rte_free(eth_dev->data->hash_mac_addrs);
582                 rte_free(eth_dev->data->dev_private);
583                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
584                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
585         }
586
587         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
588
589         return 0;
590 }
591
592 int
593 rte_eth_dev_is_valid_port(uint16_t port_id)
594 {
595         if (port_id >= RTE_MAX_ETHPORTS ||
596             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
597                 return 0;
598         else
599                 return 1;
600 }
601
602 static int
603 eth_is_valid_owner_id(uint64_t owner_id)
604 {
605         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
606             eth_dev_shared_data->next_owner_id <= owner_id)
607                 return 0;
608         return 1;
609 }
610
611 uint64_t
612 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
613 {
614         port_id = rte_eth_find_next(port_id);
615         while (port_id < RTE_MAX_ETHPORTS &&
616                         rte_eth_devices[port_id].data->owner.id != owner_id)
617                 port_id = rte_eth_find_next(port_id + 1);
618
619         return port_id;
620 }
621
622 int
623 rte_eth_dev_owner_new(uint64_t *owner_id)
624 {
625         eth_dev_shared_data_prepare();
626
627         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
628
629         *owner_id = eth_dev_shared_data->next_owner_id++;
630
631         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
632         return 0;
633 }
634
635 static int
636 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
637                        const struct rte_eth_dev_owner *new_owner)
638 {
639         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
640         struct rte_eth_dev_owner *port_owner;
641
642         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
643                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
644                         port_id);
645                 return -ENODEV;
646         }
647
648         if (!eth_is_valid_owner_id(new_owner->id) &&
649             !eth_is_valid_owner_id(old_owner_id)) {
650                 RTE_ETHDEV_LOG(ERR,
651                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
652                        old_owner_id, new_owner->id);
653                 return -EINVAL;
654         }
655
656         port_owner = &rte_eth_devices[port_id].data->owner;
657         if (port_owner->id != old_owner_id) {
658                 RTE_ETHDEV_LOG(ERR,
659                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
660                         port_id, port_owner->name, port_owner->id);
661                 return -EPERM;
662         }
663
664         /* can not truncate (same structure) */
665         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
666
667         port_owner->id = new_owner->id;
668
669         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
670                 port_id, new_owner->name, new_owner->id);
671
672         return 0;
673 }
674
675 int
676 rte_eth_dev_owner_set(const uint16_t port_id,
677                       const struct rte_eth_dev_owner *owner)
678 {
679         int ret;
680
681         eth_dev_shared_data_prepare();
682
683         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
684
685         ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
686
687         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
688         return ret;
689 }
690
691 int
692 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
693 {
694         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
695                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
696         int ret;
697
698         eth_dev_shared_data_prepare();
699
700         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
701
702         ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
703
704         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
705         return ret;
706 }
707
708 int
709 rte_eth_dev_owner_delete(const uint64_t owner_id)
710 {
711         uint16_t port_id;
712         int ret = 0;
713
714         eth_dev_shared_data_prepare();
715
716         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
717
718         if (eth_is_valid_owner_id(owner_id)) {
719                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
720                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
721                                 memset(&rte_eth_devices[port_id].data->owner, 0,
722                                        sizeof(struct rte_eth_dev_owner));
723                 RTE_ETHDEV_LOG(NOTICE,
724                         "All port owners owned by %016"PRIx64" identifier have removed\n",
725                         owner_id);
726         } else {
727                 RTE_ETHDEV_LOG(ERR,
728                                "Invalid owner id=%016"PRIx64"\n",
729                                owner_id);
730                 ret = -EINVAL;
731         }
732
733         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
734
735         return ret;
736 }
737
738 int
739 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
740 {
741         int ret = 0;
742         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
743
744         eth_dev_shared_data_prepare();
745
746         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
747
748         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
749                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
750                         port_id);
751                 ret = -ENODEV;
752         } else {
753                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
754         }
755
756         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
757         return ret;
758 }
759
760 int
761 rte_eth_dev_socket_id(uint16_t port_id)
762 {
763         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
764         return rte_eth_devices[port_id].data->numa_node;
765 }
766
767 void *
768 rte_eth_dev_get_sec_ctx(uint16_t port_id)
769 {
770         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
771         return rte_eth_devices[port_id].security_ctx;
772 }
773
774 uint16_t
775 rte_eth_dev_count_avail(void)
776 {
777         uint16_t p;
778         uint16_t count;
779
780         count = 0;
781
782         RTE_ETH_FOREACH_DEV(p)
783                 count++;
784
785         return count;
786 }
787
788 uint16_t
789 rte_eth_dev_count_total(void)
790 {
791         uint16_t port, count = 0;
792
793         RTE_ETH_FOREACH_VALID_DEV(port)
794                 count++;
795
796         return count;
797 }
798
799 int
800 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
801 {
802         char *tmp;
803
804         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
805
806         if (name == NULL) {
807                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
808                 return -EINVAL;
809         }
810
811         /* shouldn't check 'rte_eth_devices[i].data',
812          * because it might be overwritten by VDEV PMD */
813         tmp = eth_dev_shared_data->data[port_id].name;
814         strcpy(name, tmp);
815         return 0;
816 }
817
818 int
819 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
820 {
821         uint16_t pid;
822
823         if (name == NULL) {
824                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
825                 return -EINVAL;
826         }
827
828         RTE_ETH_FOREACH_VALID_DEV(pid)
829                 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
830                         *port_id = pid;
831                         return 0;
832                 }
833
834         return -ENODEV;
835 }
836
837 static int
838 eth_err(uint16_t port_id, int ret)
839 {
840         if (ret == 0)
841                 return 0;
842         if (rte_eth_dev_is_removed(port_id))
843                 return -EIO;
844         return ret;
845 }
846
847 static int
848 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
849 {
850         uint16_t old_nb_queues = dev->data->nb_rx_queues;
851         void **rxq;
852         unsigned i;
853
854         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
855                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
856                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
857                                 RTE_CACHE_LINE_SIZE);
858                 if (dev->data->rx_queues == NULL) {
859                         dev->data->nb_rx_queues = 0;
860                         return -(ENOMEM);
861                 }
862         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
863                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
864
865                 rxq = dev->data->rx_queues;
866
867                 for (i = nb_queues; i < old_nb_queues; i++)
868                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
869                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
870                                 RTE_CACHE_LINE_SIZE);
871                 if (rxq == NULL)
872                         return -(ENOMEM);
873                 if (nb_queues > old_nb_queues) {
874                         uint16_t new_qs = nb_queues - old_nb_queues;
875
876                         memset(rxq + old_nb_queues, 0,
877                                 sizeof(rxq[0]) * new_qs);
878                 }
879
880                 dev->data->rx_queues = rxq;
881
882         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
883                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
884
885                 rxq = dev->data->rx_queues;
886
887                 for (i = nb_queues; i < old_nb_queues; i++)
888                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
889
890                 rte_free(dev->data->rx_queues);
891                 dev->data->rx_queues = NULL;
892         }
893         dev->data->nb_rx_queues = nb_queues;
894         return 0;
895 }
896
897 static int
898 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
899 {
900         uint16_t port_id;
901
902         if (rx_queue_id >= dev->data->nb_rx_queues) {
903                 port_id = dev->data->port_id;
904                 RTE_ETHDEV_LOG(ERR,
905                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
906                                rx_queue_id, port_id);
907                 return -EINVAL;
908         }
909
910         if (dev->data->rx_queues[rx_queue_id] == NULL) {
911                 port_id = dev->data->port_id;
912                 RTE_ETHDEV_LOG(ERR,
913                                "Queue %u of device with port_id=%u has not been setup\n",
914                                rx_queue_id, port_id);
915                 return -EINVAL;
916         }
917
918         return 0;
919 }
920
921 static int
922 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
923 {
924         uint16_t port_id;
925
926         if (tx_queue_id >= dev->data->nb_tx_queues) {
927                 port_id = dev->data->port_id;
928                 RTE_ETHDEV_LOG(ERR,
929                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
930                                tx_queue_id, port_id);
931                 return -EINVAL;
932         }
933
934         if (dev->data->tx_queues[tx_queue_id] == NULL) {
935                 port_id = dev->data->port_id;
936                 RTE_ETHDEV_LOG(ERR,
937                                "Queue %u of device with port_id=%u has not been setup\n",
938                                tx_queue_id, port_id);
939                 return -EINVAL;
940         }
941
942         return 0;
943 }
944
945 int
946 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
947 {
948         struct rte_eth_dev *dev;
949         int ret;
950
951         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
952
953         dev = &rte_eth_devices[port_id];
954         if (!dev->data->dev_started) {
955                 RTE_ETHDEV_LOG(ERR,
956                         "Port %u must be started before start any queue\n",
957                         port_id);
958                 return -EINVAL;
959         }
960
961         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
962         if (ret != 0)
963                 return ret;
964
965         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
966
967         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
968                 RTE_ETHDEV_LOG(INFO,
969                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
970                         rx_queue_id, port_id);
971                 return -EINVAL;
972         }
973
974         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
975                 RTE_ETHDEV_LOG(INFO,
976                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
977                         rx_queue_id, port_id);
978                 return 0;
979         }
980
981         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
982                                                              rx_queue_id));
983
984 }
985
986 int
987 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
988 {
989         struct rte_eth_dev *dev;
990         int ret;
991
992         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
993
994         dev = &rte_eth_devices[port_id];
995
996         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
997         if (ret != 0)
998                 return ret;
999
1000         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
1001
1002         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1003                 RTE_ETHDEV_LOG(INFO,
1004                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1005                         rx_queue_id, port_id);
1006                 return -EINVAL;
1007         }
1008
1009         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1010                 RTE_ETHDEV_LOG(INFO,
1011                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1012                         rx_queue_id, port_id);
1013                 return 0;
1014         }
1015
1016         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1017
1018 }
1019
1020 int
1021 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1022 {
1023         struct rte_eth_dev *dev;
1024         int ret;
1025
1026         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1027
1028         dev = &rte_eth_devices[port_id];
1029         if (!dev->data->dev_started) {
1030                 RTE_ETHDEV_LOG(ERR,
1031                         "Port %u must be started before start any queue\n",
1032                         port_id);
1033                 return -EINVAL;
1034         }
1035
1036         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1037         if (ret != 0)
1038                 return ret;
1039
1040         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1041
1042         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1043                 RTE_ETHDEV_LOG(INFO,
1044                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1045                         tx_queue_id, port_id);
1046                 return -EINVAL;
1047         }
1048
1049         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1050                 RTE_ETHDEV_LOG(INFO,
1051                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1052                         tx_queue_id, port_id);
1053                 return 0;
1054         }
1055
1056         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1057 }
1058
1059 int
1060 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1061 {
1062         struct rte_eth_dev *dev;
1063         int ret;
1064
1065         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1066
1067         dev = &rte_eth_devices[port_id];
1068
1069         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1070         if (ret != 0)
1071                 return ret;
1072
1073         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1074
1075         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1076                 RTE_ETHDEV_LOG(INFO,
1077                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1078                         tx_queue_id, port_id);
1079                 return -EINVAL;
1080         }
1081
1082         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1083                 RTE_ETHDEV_LOG(INFO,
1084                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1085                         tx_queue_id, port_id);
1086                 return 0;
1087         }
1088
1089         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1090
1091 }
1092
1093 static int
1094 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1095 {
1096         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1097         void **txq;
1098         unsigned i;
1099
1100         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1101                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1102                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1103                                                    RTE_CACHE_LINE_SIZE);
1104                 if (dev->data->tx_queues == NULL) {
1105                         dev->data->nb_tx_queues = 0;
1106                         return -(ENOMEM);
1107                 }
1108         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1109                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1110
1111                 txq = dev->data->tx_queues;
1112
1113                 for (i = nb_queues; i < old_nb_queues; i++)
1114                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1115                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1116                                   RTE_CACHE_LINE_SIZE);
1117                 if (txq == NULL)
1118                         return -ENOMEM;
1119                 if (nb_queues > old_nb_queues) {
1120                         uint16_t new_qs = nb_queues - old_nb_queues;
1121
1122                         memset(txq + old_nb_queues, 0,
1123                                sizeof(txq[0]) * new_qs);
1124                 }
1125
1126                 dev->data->tx_queues = txq;
1127
1128         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1129                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1130
1131                 txq = dev->data->tx_queues;
1132
1133                 for (i = nb_queues; i < old_nb_queues; i++)
1134                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1135
1136                 rte_free(dev->data->tx_queues);
1137                 dev->data->tx_queues = NULL;
1138         }
1139         dev->data->nb_tx_queues = nb_queues;
1140         return 0;
1141 }
1142
1143 uint32_t
1144 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1145 {
1146         switch (speed) {
1147         case ETH_SPEED_NUM_10M:
1148                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1149         case ETH_SPEED_NUM_100M:
1150                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1151         case ETH_SPEED_NUM_1G:
1152                 return ETH_LINK_SPEED_1G;
1153         case ETH_SPEED_NUM_2_5G:
1154                 return ETH_LINK_SPEED_2_5G;
1155         case ETH_SPEED_NUM_5G:
1156                 return ETH_LINK_SPEED_5G;
1157         case ETH_SPEED_NUM_10G:
1158                 return ETH_LINK_SPEED_10G;
1159         case ETH_SPEED_NUM_20G:
1160                 return ETH_LINK_SPEED_20G;
1161         case ETH_SPEED_NUM_25G:
1162                 return ETH_LINK_SPEED_25G;
1163         case ETH_SPEED_NUM_40G:
1164                 return ETH_LINK_SPEED_40G;
1165         case ETH_SPEED_NUM_50G:
1166                 return ETH_LINK_SPEED_50G;
1167         case ETH_SPEED_NUM_56G:
1168                 return ETH_LINK_SPEED_56G;
1169         case ETH_SPEED_NUM_100G:
1170                 return ETH_LINK_SPEED_100G;
1171         case ETH_SPEED_NUM_200G:
1172                 return ETH_LINK_SPEED_200G;
1173         default:
1174                 return 0;
1175         }
1176 }
1177
1178 const char *
1179 rte_eth_dev_rx_offload_name(uint64_t offload)
1180 {
1181         const char *name = "UNKNOWN";
1182         unsigned int i;
1183
1184         for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1185                 if (offload == eth_dev_rx_offload_names[i].offload) {
1186                         name = eth_dev_rx_offload_names[i].name;
1187                         break;
1188                 }
1189         }
1190
1191         return name;
1192 }
1193
1194 const char *
1195 rte_eth_dev_tx_offload_name(uint64_t offload)
1196 {
1197         const char *name = "UNKNOWN";
1198         unsigned int i;
1199
1200         for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1201                 if (offload == eth_dev_tx_offload_names[i].offload) {
1202                         name = eth_dev_tx_offload_names[i].name;
1203                         break;
1204                 }
1205         }
1206
1207         return name;
1208 }
1209
1210 static inline int
1211 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1212                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1213 {
1214         int ret = 0;
1215
1216         if (dev_info_size == 0) {
1217                 if (config_size != max_rx_pkt_len) {
1218                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1219                                        " %u != %u is not allowed\n",
1220                                        port_id, config_size, max_rx_pkt_len);
1221                         ret = -EINVAL;
1222                 }
1223         } else if (config_size > dev_info_size) {
1224                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1225                                "> max allowed value %u\n", port_id, config_size,
1226                                dev_info_size);
1227                 ret = -EINVAL;
1228         } else if (config_size < RTE_ETHER_MIN_LEN) {
1229                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1230                                "< min allowed value %u\n", port_id, config_size,
1231                                (unsigned int)RTE_ETHER_MIN_LEN);
1232                 ret = -EINVAL;
1233         }
1234         return ret;
1235 }
1236
1237 /*
1238  * Validate offloads that are requested through rte_eth_dev_configure against
1239  * the offloads successfully set by the ethernet device.
1240  *
1241  * @param port_id
1242  *   The port identifier of the Ethernet device.
1243  * @param req_offloads
1244  *   The offloads that have been requested through `rte_eth_dev_configure`.
1245  * @param set_offloads
1246  *   The offloads successfully set by the ethernet device.
1247  * @param offload_type
1248  *   The offload type i.e. Rx/Tx string.
1249  * @param offload_name
1250  *   The function that prints the offload name.
1251  * @return
1252  *   - (0) if validation successful.
1253  *   - (-EINVAL) if requested offload has been silently disabled.
1254  *
1255  */
1256 static int
1257 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1258                   uint64_t set_offloads, const char *offload_type,
1259                   const char *(*offload_name)(uint64_t))
1260 {
1261         uint64_t offloads_diff = req_offloads ^ set_offloads;
1262         uint64_t offload;
1263         int ret = 0;
1264
1265         while (offloads_diff != 0) {
1266                 /* Check if any offload is requested but not enabled. */
1267                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1268                 if (offload & req_offloads) {
1269                         RTE_ETHDEV_LOG(ERR,
1270                                 "Port %u failed to enable %s offload %s\n",
1271                                 port_id, offload_type, offload_name(offload));
1272                         ret = -EINVAL;
1273                 }
1274
1275                 /* Check if offload couldn't be disabled. */
1276                 if (offload & set_offloads) {
1277                         RTE_ETHDEV_LOG(DEBUG,
1278                                 "Port %u %s offload %s is not requested but enabled\n",
1279                                 port_id, offload_type, offload_name(offload));
1280                 }
1281
1282                 offloads_diff &= ~offload;
1283         }
1284
1285         return ret;
1286 }
1287
1288 int
1289 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1290                       const struct rte_eth_conf *dev_conf)
1291 {
1292         struct rte_eth_dev *dev;
1293         struct rte_eth_dev_info dev_info;
1294         struct rte_eth_conf orig_conf;
1295         uint16_t overhead_len;
1296         int diag;
1297         int ret;
1298         uint16_t old_mtu;
1299
1300         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1301
1302         dev = &rte_eth_devices[port_id];
1303
1304         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1305
1306         if (dev->data->dev_started) {
1307                 RTE_ETHDEV_LOG(ERR,
1308                         "Port %u must be stopped to allow configuration\n",
1309                         port_id);
1310                 return -EBUSY;
1311         }
1312
1313          /* Store original config, as rollback required on failure */
1314         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1315
1316         /*
1317          * Copy the dev_conf parameter into the dev structure.
1318          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1319          */
1320         if (dev_conf != &dev->data->dev_conf)
1321                 memcpy(&dev->data->dev_conf, dev_conf,
1322                        sizeof(dev->data->dev_conf));
1323
1324         /* Backup mtu for rollback */
1325         old_mtu = dev->data->mtu;
1326
1327         ret = rte_eth_dev_info_get(port_id, &dev_info);
1328         if (ret != 0)
1329                 goto rollback;
1330
1331         /* Get the real Ethernet overhead length */
1332         if (dev_info.max_mtu != UINT16_MAX &&
1333             dev_info.max_rx_pktlen > dev_info.max_mtu)
1334                 overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
1335         else
1336                 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1337
1338         /* If number of queues specified by application for both Rx and Tx is
1339          * zero, use driver preferred values. This cannot be done individually
1340          * as it is valid for either Tx or Rx (but not both) to be zero.
1341          * If driver does not provide any preferred valued, fall back on
1342          * EAL defaults.
1343          */
1344         if (nb_rx_q == 0 && nb_tx_q == 0) {
1345                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1346                 if (nb_rx_q == 0)
1347                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1348                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1349                 if (nb_tx_q == 0)
1350                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1351         }
1352
1353         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1354                 RTE_ETHDEV_LOG(ERR,
1355                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1356                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1357                 ret = -EINVAL;
1358                 goto rollback;
1359         }
1360
1361         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1362                 RTE_ETHDEV_LOG(ERR,
1363                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1364                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1365                 ret = -EINVAL;
1366                 goto rollback;
1367         }
1368
1369         /*
1370          * Check that the numbers of RX and TX queues are not greater
1371          * than the maximum number of RX and TX queues supported by the
1372          * configured device.
1373          */
1374         if (nb_rx_q > dev_info.max_rx_queues) {
1375                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1376                         port_id, nb_rx_q, dev_info.max_rx_queues);
1377                 ret = -EINVAL;
1378                 goto rollback;
1379         }
1380
1381         if (nb_tx_q > dev_info.max_tx_queues) {
1382                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1383                         port_id, nb_tx_q, dev_info.max_tx_queues);
1384                 ret = -EINVAL;
1385                 goto rollback;
1386         }
1387
1388         /* Check that the device supports requested interrupts */
1389         if ((dev_conf->intr_conf.lsc == 1) &&
1390                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1391                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1392                         dev->device->driver->name);
1393                 ret = -EINVAL;
1394                 goto rollback;
1395         }
1396         if ((dev_conf->intr_conf.rmv == 1) &&
1397                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1398                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1399                         dev->device->driver->name);
1400                 ret = -EINVAL;
1401                 goto rollback;
1402         }
1403
1404         /*
1405          * If jumbo frames are enabled, check that the maximum RX packet
1406          * length is supported by the configured device.
1407          */
1408         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1409                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1410                         RTE_ETHDEV_LOG(ERR,
1411                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1412                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1413                                 dev_info.max_rx_pktlen);
1414                         ret = -EINVAL;
1415                         goto rollback;
1416                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1417                         RTE_ETHDEV_LOG(ERR,
1418                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1419                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1420                                 (unsigned int)RTE_ETHER_MIN_LEN);
1421                         ret = -EINVAL;
1422                         goto rollback;
1423                 }
1424
1425                 /* Scale the MTU size to adapt max_rx_pkt_len */
1426                 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
1427                                 overhead_len;
1428         } else {
1429                 uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
1430                 if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
1431                     pktlen > RTE_ETHER_MTU + overhead_len)
1432                         /* Use default value */
1433                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1434                                                 RTE_ETHER_MTU + overhead_len;
1435         }
1436
1437         /*
1438          * If LRO is enabled, check that the maximum aggregated packet
1439          * size is supported by the configured device.
1440          */
1441         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1442                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1443                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1444                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1445                 ret = eth_dev_check_lro_pkt_size(port_id,
1446                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1447                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1448                                 dev_info.max_lro_pkt_size);
1449                 if (ret != 0)
1450                         goto rollback;
1451         }
1452
1453         /* Any requested offloading must be within its device capabilities */
1454         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1455              dev_conf->rxmode.offloads) {
1456                 RTE_ETHDEV_LOG(ERR,
1457                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1458                         "capabilities 0x%"PRIx64" in %s()\n",
1459                         port_id, dev_conf->rxmode.offloads,
1460                         dev_info.rx_offload_capa,
1461                         __func__);
1462                 ret = -EINVAL;
1463                 goto rollback;
1464         }
1465         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1466              dev_conf->txmode.offloads) {
1467                 RTE_ETHDEV_LOG(ERR,
1468                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1469                         "capabilities 0x%"PRIx64" in %s()\n",
1470                         port_id, dev_conf->txmode.offloads,
1471                         dev_info.tx_offload_capa,
1472                         __func__);
1473                 ret = -EINVAL;
1474                 goto rollback;
1475         }
1476
1477         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1478                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1479
1480         /* Check that device supports requested rss hash functions. */
1481         if ((dev_info.flow_type_rss_offloads |
1482              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1483             dev_info.flow_type_rss_offloads) {
1484                 RTE_ETHDEV_LOG(ERR,
1485                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1486                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1487                         dev_info.flow_type_rss_offloads);
1488                 ret = -EINVAL;
1489                 goto rollback;
1490         }
1491
1492         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1493         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1494             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1495                 RTE_ETHDEV_LOG(ERR,
1496                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1497                         port_id,
1498                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1499                 ret = -EINVAL;
1500                 goto rollback;
1501         }
1502
1503         /*
1504          * Setup new number of RX/TX queues and reconfigure device.
1505          */
1506         diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1507         if (diag != 0) {
1508                 RTE_ETHDEV_LOG(ERR,
1509                         "Port%u eth_dev_rx_queue_config = %d\n",
1510                         port_id, diag);
1511                 ret = diag;
1512                 goto rollback;
1513         }
1514
1515         diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1516         if (diag != 0) {
1517                 RTE_ETHDEV_LOG(ERR,
1518                         "Port%u eth_dev_tx_queue_config = %d\n",
1519                         port_id, diag);
1520                 eth_dev_rx_queue_config(dev, 0);
1521                 ret = diag;
1522                 goto rollback;
1523         }
1524
1525         diag = (*dev->dev_ops->dev_configure)(dev);
1526         if (diag != 0) {
1527                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1528                         port_id, diag);
1529                 ret = eth_err(port_id, diag);
1530                 goto reset_queues;
1531         }
1532
1533         /* Initialize Rx profiling if enabled at compilation time. */
1534         diag = __rte_eth_dev_profile_init(port_id, dev);
1535         if (diag != 0) {
1536                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1537                         port_id, diag);
1538                 ret = eth_err(port_id, diag);
1539                 goto reset_queues;
1540         }
1541
1542         /* Validate Rx offloads. */
1543         diag = eth_dev_validate_offloads(port_id,
1544                         dev_conf->rxmode.offloads,
1545                         dev->data->dev_conf.rxmode.offloads, "Rx",
1546                         rte_eth_dev_rx_offload_name);
1547         if (diag != 0) {
1548                 ret = diag;
1549                 goto reset_queues;
1550         }
1551
1552         /* Validate Tx offloads. */
1553         diag = eth_dev_validate_offloads(port_id,
1554                         dev_conf->txmode.offloads,
1555                         dev->data->dev_conf.txmode.offloads, "Tx",
1556                         rte_eth_dev_tx_offload_name);
1557         if (diag != 0) {
1558                 ret = diag;
1559                 goto reset_queues;
1560         }
1561
1562         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1563         return 0;
1564 reset_queues:
1565         eth_dev_rx_queue_config(dev, 0);
1566         eth_dev_tx_queue_config(dev, 0);
1567 rollback:
1568         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1569         if (old_mtu != dev->data->mtu)
1570                 dev->data->mtu = old_mtu;
1571
1572         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1573         return ret;
1574 }
1575
1576 void
1577 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1578 {
1579         if (dev->data->dev_started) {
1580                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1581                         dev->data->port_id);
1582                 return;
1583         }
1584
1585         eth_dev_rx_queue_config(dev, 0);
1586         eth_dev_tx_queue_config(dev, 0);
1587
1588         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1589 }
1590
1591 static void
1592 eth_dev_mac_restore(struct rte_eth_dev *dev,
1593                         struct rte_eth_dev_info *dev_info)
1594 {
1595         struct rte_ether_addr *addr;
1596         uint16_t i;
1597         uint32_t pool = 0;
1598         uint64_t pool_mask;
1599
1600         /* replay MAC address configuration including default MAC */
1601         addr = &dev->data->mac_addrs[0];
1602         if (*dev->dev_ops->mac_addr_set != NULL)
1603                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1604         else if (*dev->dev_ops->mac_addr_add != NULL)
1605                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1606
1607         if (*dev->dev_ops->mac_addr_add != NULL) {
1608                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1609                         addr = &dev->data->mac_addrs[i];
1610
1611                         /* skip zero address */
1612                         if (rte_is_zero_ether_addr(addr))
1613                                 continue;
1614
1615                         pool = 0;
1616                         pool_mask = dev->data->mac_pool_sel[i];
1617
1618                         do {
1619                                 if (pool_mask & 1ULL)
1620                                         (*dev->dev_ops->mac_addr_add)(dev,
1621                                                 addr, i, pool);
1622                                 pool_mask >>= 1;
1623                                 pool++;
1624                         } while (pool_mask);
1625                 }
1626         }
1627 }
1628
1629 static int
1630 eth_dev_config_restore(struct rte_eth_dev *dev,
1631                 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1632 {
1633         int ret;
1634
1635         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1636                 eth_dev_mac_restore(dev, dev_info);
1637
1638         /* replay promiscuous configuration */
1639         /*
1640          * use callbacks directly since we don't need port_id check and
1641          * would like to bypass the same value set
1642          */
1643         if (rte_eth_promiscuous_get(port_id) == 1 &&
1644             *dev->dev_ops->promiscuous_enable != NULL) {
1645                 ret = eth_err(port_id,
1646                               (*dev->dev_ops->promiscuous_enable)(dev));
1647                 if (ret != 0 && ret != -ENOTSUP) {
1648                         RTE_ETHDEV_LOG(ERR,
1649                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1650                                 port_id, rte_strerror(-ret));
1651                         return ret;
1652                 }
1653         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1654                    *dev->dev_ops->promiscuous_disable != NULL) {
1655                 ret = eth_err(port_id,
1656                               (*dev->dev_ops->promiscuous_disable)(dev));
1657                 if (ret != 0 && ret != -ENOTSUP) {
1658                         RTE_ETHDEV_LOG(ERR,
1659                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1660                                 port_id, rte_strerror(-ret));
1661                         return ret;
1662                 }
1663         }
1664
1665         /* replay all multicast configuration */
1666         /*
1667          * use callbacks directly since we don't need port_id check and
1668          * would like to bypass the same value set
1669          */
1670         if (rte_eth_allmulticast_get(port_id) == 1 &&
1671             *dev->dev_ops->allmulticast_enable != NULL) {
1672                 ret = eth_err(port_id,
1673                               (*dev->dev_ops->allmulticast_enable)(dev));
1674                 if (ret != 0 && ret != -ENOTSUP) {
1675                         RTE_ETHDEV_LOG(ERR,
1676                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1677                                 port_id, rte_strerror(-ret));
1678                         return ret;
1679                 }
1680         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1681                    *dev->dev_ops->allmulticast_disable != NULL) {
1682                 ret = eth_err(port_id,
1683                               (*dev->dev_ops->allmulticast_disable)(dev));
1684                 if (ret != 0 && ret != -ENOTSUP) {
1685                         RTE_ETHDEV_LOG(ERR,
1686                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1687                                 port_id, rte_strerror(-ret));
1688                         return ret;
1689                 }
1690         }
1691
1692         return 0;
1693 }
1694
1695 int
1696 rte_eth_dev_start(uint16_t port_id)
1697 {
1698         struct rte_eth_dev *dev;
1699         struct rte_eth_dev_info dev_info;
1700         int diag;
1701         int ret, ret_stop;
1702
1703         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1704
1705         dev = &rte_eth_devices[port_id];
1706
1707         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1708
1709         if (dev->data->dev_started != 0) {
1710                 RTE_ETHDEV_LOG(INFO,
1711                         "Device with port_id=%"PRIu16" already started\n",
1712                         port_id);
1713                 return 0;
1714         }
1715
1716         ret = rte_eth_dev_info_get(port_id, &dev_info);
1717         if (ret != 0)
1718                 return ret;
1719
1720         /* Lets restore MAC now if device does not support live change */
1721         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1722                 eth_dev_mac_restore(dev, &dev_info);
1723
1724         diag = (*dev->dev_ops->dev_start)(dev);
1725         if (diag == 0)
1726                 dev->data->dev_started = 1;
1727         else
1728                 return eth_err(port_id, diag);
1729
1730         ret = eth_dev_config_restore(dev, &dev_info, port_id);
1731         if (ret != 0) {
1732                 RTE_ETHDEV_LOG(ERR,
1733                         "Error during restoring configuration for device (port %u): %s\n",
1734                         port_id, rte_strerror(-ret));
1735                 ret_stop = rte_eth_dev_stop(port_id);
1736                 if (ret_stop != 0) {
1737                         RTE_ETHDEV_LOG(ERR,
1738                                 "Failed to stop device (port %u): %s\n",
1739                                 port_id, rte_strerror(-ret_stop));
1740                 }
1741
1742                 return ret;
1743         }
1744
1745         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1746                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1747                 (*dev->dev_ops->link_update)(dev, 0);
1748         }
1749
1750         rte_ethdev_trace_start(port_id);
1751         return 0;
1752 }
1753
1754 int
1755 rte_eth_dev_stop(uint16_t port_id)
1756 {
1757         struct rte_eth_dev *dev;
1758         int ret;
1759
1760         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1761         dev = &rte_eth_devices[port_id];
1762
1763         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1764
1765         if (dev->data->dev_started == 0) {
1766                 RTE_ETHDEV_LOG(INFO,
1767                         "Device with port_id=%"PRIu16" already stopped\n",
1768                         port_id);
1769                 return 0;
1770         }
1771
1772         dev->data->dev_started = 0;
1773         ret = (*dev->dev_ops->dev_stop)(dev);
1774         rte_ethdev_trace_stop(port_id, ret);
1775
1776         return ret;
1777 }
1778
1779 int
1780 rte_eth_dev_set_link_up(uint16_t port_id)
1781 {
1782         struct rte_eth_dev *dev;
1783
1784         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1785
1786         dev = &rte_eth_devices[port_id];
1787
1788         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1789         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1790 }
1791
1792 int
1793 rte_eth_dev_set_link_down(uint16_t port_id)
1794 {
1795         struct rte_eth_dev *dev;
1796
1797         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1798
1799         dev = &rte_eth_devices[port_id];
1800
1801         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1802         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1803 }
1804
1805 int
1806 rte_eth_dev_close(uint16_t port_id)
1807 {
1808         struct rte_eth_dev *dev;
1809         int firsterr, binerr;
1810         int *lasterr = &firsterr;
1811
1812         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1813         dev = &rte_eth_devices[port_id];
1814
1815         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1816         *lasterr = (*dev->dev_ops->dev_close)(dev);
1817         if (*lasterr != 0)
1818                 lasterr = &binerr;
1819
1820         rte_ethdev_trace_close(port_id);
1821         *lasterr = rte_eth_dev_release_port(dev);
1822
1823         return firsterr;
1824 }
1825
1826 int
1827 rte_eth_dev_reset(uint16_t port_id)
1828 {
1829         struct rte_eth_dev *dev;
1830         int ret;
1831
1832         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1833         dev = &rte_eth_devices[port_id];
1834
1835         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1836
1837         ret = rte_eth_dev_stop(port_id);
1838         if (ret != 0) {
1839                 RTE_ETHDEV_LOG(ERR,
1840                         "Failed to stop device (port %u) before reset: %s - ignore\n",
1841                         port_id, rte_strerror(-ret));
1842         }
1843         ret = dev->dev_ops->dev_reset(dev);
1844
1845         return eth_err(port_id, ret);
1846 }
1847
1848 int
1849 rte_eth_dev_is_removed(uint16_t port_id)
1850 {
1851         struct rte_eth_dev *dev;
1852         int ret;
1853
1854         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1855
1856         dev = &rte_eth_devices[port_id];
1857
1858         if (dev->state == RTE_ETH_DEV_REMOVED)
1859                 return 1;
1860
1861         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1862
1863         ret = dev->dev_ops->is_removed(dev);
1864         if (ret != 0)
1865                 /* Device is physically removed. */
1866                 dev->state = RTE_ETH_DEV_REMOVED;
1867
1868         return ret;
1869 }
1870
1871 static int
1872 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1873                              uint16_t n_seg, uint32_t *mbp_buf_size,
1874                              const struct rte_eth_dev_info *dev_info)
1875 {
1876         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1877         struct rte_mempool *mp_first;
1878         uint32_t offset_mask;
1879         uint16_t seg_idx;
1880
1881         if (n_seg > seg_capa->max_nseg) {
1882                 RTE_ETHDEV_LOG(ERR,
1883                                "Requested Rx segments %u exceed supported %u\n",
1884                                n_seg, seg_capa->max_nseg);
1885                 return -EINVAL;
1886         }
1887         /*
1888          * Check the sizes and offsets against buffer sizes
1889          * for each segment specified in extended configuration.
1890          */
1891         mp_first = rx_seg[0].mp;
1892         offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1893         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1894                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1895                 uint32_t length = rx_seg[seg_idx].length;
1896                 uint32_t offset = rx_seg[seg_idx].offset;
1897
1898                 if (mpl == NULL) {
1899                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1900                         return -EINVAL;
1901                 }
1902                 if (seg_idx != 0 && mp_first != mpl &&
1903                     seg_capa->multi_pools == 0) {
1904                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1905                         return -ENOTSUP;
1906                 }
1907                 if (offset != 0) {
1908                         if (seg_capa->offset_allowed == 0) {
1909                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1910                                 return -ENOTSUP;
1911                         }
1912                         if (offset & offset_mask) {
1913                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1914                                                offset,
1915                                                seg_capa->offset_align_log2);
1916                                 return -EINVAL;
1917                         }
1918                 }
1919                 if (mpl->private_data_size <
1920                         sizeof(struct rte_pktmbuf_pool_private)) {
1921                         RTE_ETHDEV_LOG(ERR,
1922                                        "%s private_data_size %u < %u\n",
1923                                        mpl->name, mpl->private_data_size,
1924                                        (unsigned int)sizeof
1925                                         (struct rte_pktmbuf_pool_private));
1926                         return -ENOSPC;
1927                 }
1928                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1929                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1930                 length = length != 0 ? length : *mbp_buf_size;
1931                 if (*mbp_buf_size < length + offset) {
1932                         RTE_ETHDEV_LOG(ERR,
1933                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1934                                        mpl->name, *mbp_buf_size,
1935                                        length + offset, length, offset);
1936                         return -EINVAL;
1937                 }
1938         }
1939         return 0;
1940 }
1941
1942 int
1943 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1944                        uint16_t nb_rx_desc, unsigned int socket_id,
1945                        const struct rte_eth_rxconf *rx_conf,
1946                        struct rte_mempool *mp)
1947 {
1948         int ret;
1949         uint32_t mbp_buf_size;
1950         struct rte_eth_dev *dev;
1951         struct rte_eth_dev_info dev_info;
1952         struct rte_eth_rxconf local_conf;
1953         void **rxq;
1954
1955         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1956
1957         dev = &rte_eth_devices[port_id];
1958         if (rx_queue_id >= dev->data->nb_rx_queues) {
1959                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1960                 return -EINVAL;
1961         }
1962
1963         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1964
1965         ret = rte_eth_dev_info_get(port_id, &dev_info);
1966         if (ret != 0)
1967                 return ret;
1968
1969         if (mp != NULL) {
1970                 /* Single pool configuration check. */
1971                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1972                         RTE_ETHDEV_LOG(ERR,
1973                                        "Ambiguous segment configuration\n");
1974                         return -EINVAL;
1975                 }
1976                 /*
1977                  * Check the size of the mbuf data buffer, this value
1978                  * must be provided in the private data of the memory pool.
1979                  * First check that the memory pool(s) has a valid private data.
1980                  */
1981                 if (mp->private_data_size <
1982                                 sizeof(struct rte_pktmbuf_pool_private)) {
1983                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1984                                 mp->name, mp->private_data_size,
1985                                 (unsigned int)
1986                                 sizeof(struct rte_pktmbuf_pool_private));
1987                         return -ENOSPC;
1988                 }
1989                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1990                 if (mbp_buf_size < dev_info.min_rx_bufsize +
1991                                    RTE_PKTMBUF_HEADROOM) {
1992                         RTE_ETHDEV_LOG(ERR,
1993                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1994                                        mp->name, mbp_buf_size,
1995                                        RTE_PKTMBUF_HEADROOM +
1996                                        dev_info.min_rx_bufsize,
1997                                        RTE_PKTMBUF_HEADROOM,
1998                                        dev_info.min_rx_bufsize);
1999                         return -EINVAL;
2000                 }
2001         } else {
2002                 const struct rte_eth_rxseg_split *rx_seg;
2003                 uint16_t n_seg;
2004
2005                 /* Extended multi-segment configuration check. */
2006                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
2007                         RTE_ETHDEV_LOG(ERR,
2008                                        "Memory pool is null and no extended configuration provided\n");
2009                         return -EINVAL;
2010                 }
2011
2012                 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2013                 n_seg = rx_conf->rx_nseg;
2014
2015                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2016                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
2017                                                            &mbp_buf_size,
2018                                                            &dev_info);
2019                         if (ret != 0)
2020                                 return ret;
2021                 } else {
2022                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2023                         return -EINVAL;
2024                 }
2025         }
2026
2027         /* Use default specified by driver, if nb_rx_desc is zero */
2028         if (nb_rx_desc == 0) {
2029                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
2030                 /* If driver default is also zero, fall back on EAL default */
2031                 if (nb_rx_desc == 0)
2032                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2033         }
2034
2035         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2036                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2037                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2038
2039                 RTE_ETHDEV_LOG(ERR,
2040                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2041                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2042                         dev_info.rx_desc_lim.nb_min,
2043                         dev_info.rx_desc_lim.nb_align);
2044                 return -EINVAL;
2045         }
2046
2047         if (dev->data->dev_started &&
2048                 !(dev_info.dev_capa &
2049                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2050                 return -EBUSY;
2051
2052         if (dev->data->dev_started &&
2053                 (dev->data->rx_queue_state[rx_queue_id] !=
2054                         RTE_ETH_QUEUE_STATE_STOPPED))
2055                 return -EBUSY;
2056
2057         rxq = dev->data->rx_queues;
2058         if (rxq[rx_queue_id]) {
2059                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2060                                         -ENOTSUP);
2061                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2062                 rxq[rx_queue_id] = NULL;
2063         }
2064
2065         if (rx_conf == NULL)
2066                 rx_conf = &dev_info.default_rxconf;
2067
2068         local_conf = *rx_conf;
2069
2070         /*
2071          * If an offloading has already been enabled in
2072          * rte_eth_dev_configure(), it has been enabled on all queues,
2073          * so there is no need to enable it in this queue again.
2074          * The local_conf.offloads input to underlying PMD only carries
2075          * those offloadings which are only enabled on this queue and
2076          * not enabled on all queues.
2077          */
2078         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2079
2080         /*
2081          * New added offloadings for this queue are those not enabled in
2082          * rte_eth_dev_configure() and they must be per-queue type.
2083          * A pure per-port offloading can't be enabled on a queue while
2084          * disabled on another queue. A pure per-port offloading can't
2085          * be enabled for any queue as new added one if it hasn't been
2086          * enabled in rte_eth_dev_configure().
2087          */
2088         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2089              local_conf.offloads) {
2090                 RTE_ETHDEV_LOG(ERR,
2091                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2092                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2093                         port_id, rx_queue_id, local_conf.offloads,
2094                         dev_info.rx_queue_offload_capa,
2095                         __func__);
2096                 return -EINVAL;
2097         }
2098
2099         /*
2100          * If LRO is enabled, check that the maximum aggregated packet
2101          * size is supported by the configured device.
2102          */
2103         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2104                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2105                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
2106                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2107                 int ret = eth_dev_check_lro_pkt_size(port_id,
2108                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2109                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2110                                 dev_info.max_lro_pkt_size);
2111                 if (ret != 0)
2112                         return ret;
2113         }
2114
2115         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2116                                               socket_id, &local_conf, mp);
2117         if (!ret) {
2118                 if (!dev->data->min_rx_buf_size ||
2119                     dev->data->min_rx_buf_size > mbp_buf_size)
2120                         dev->data->min_rx_buf_size = mbp_buf_size;
2121         }
2122
2123         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2124                 rx_conf, ret);
2125         return eth_err(port_id, ret);
2126 }
2127
2128 int
2129 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2130                                uint16_t nb_rx_desc,
2131                                const struct rte_eth_hairpin_conf *conf)
2132 {
2133         int ret;
2134         struct rte_eth_dev *dev;
2135         struct rte_eth_hairpin_cap cap;
2136         void **rxq;
2137         int i;
2138         int count;
2139
2140         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2141
2142         dev = &rte_eth_devices[port_id];
2143         if (rx_queue_id >= dev->data->nb_rx_queues) {
2144                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2145                 return -EINVAL;
2146         }
2147         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2148         if (ret != 0)
2149                 return ret;
2150         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2151                                 -ENOTSUP);
2152         /* if nb_rx_desc is zero use max number of desc from the driver. */
2153         if (nb_rx_desc == 0)
2154                 nb_rx_desc = cap.max_nb_desc;
2155         if (nb_rx_desc > cap.max_nb_desc) {
2156                 RTE_ETHDEV_LOG(ERR,
2157                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2158                         nb_rx_desc, cap.max_nb_desc);
2159                 return -EINVAL;
2160         }
2161         if (conf->peer_count > cap.max_rx_2_tx) {
2162                 RTE_ETHDEV_LOG(ERR,
2163                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2164                         conf->peer_count, cap.max_rx_2_tx);
2165                 return -EINVAL;
2166         }
2167         if (conf->peer_count == 0) {
2168                 RTE_ETHDEV_LOG(ERR,
2169                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2170                         conf->peer_count);
2171                 return -EINVAL;
2172         }
2173         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2174              cap.max_nb_queues != UINT16_MAX; i++) {
2175                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2176                         count++;
2177         }
2178         if (count > cap.max_nb_queues) {
2179                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2180                 cap.max_nb_queues);
2181                 return -EINVAL;
2182         }
2183         if (dev->data->dev_started)
2184                 return -EBUSY;
2185         rxq = dev->data->rx_queues;
2186         if (rxq[rx_queue_id] != NULL) {
2187                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2188                                         -ENOTSUP);
2189                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2190                 rxq[rx_queue_id] = NULL;
2191         }
2192         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2193                                                       nb_rx_desc, conf);
2194         if (ret == 0)
2195                 dev->data->rx_queue_state[rx_queue_id] =
2196                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2197         return eth_err(port_id, ret);
2198 }
2199
2200 int
2201 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2202                        uint16_t nb_tx_desc, unsigned int socket_id,
2203                        const struct rte_eth_txconf *tx_conf)
2204 {
2205         struct rte_eth_dev *dev;
2206         struct rte_eth_dev_info dev_info;
2207         struct rte_eth_txconf local_conf;
2208         void **txq;
2209         int ret;
2210
2211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2212
2213         dev = &rte_eth_devices[port_id];
2214         if (tx_queue_id >= dev->data->nb_tx_queues) {
2215                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2216                 return -EINVAL;
2217         }
2218
2219         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2220
2221         ret = rte_eth_dev_info_get(port_id, &dev_info);
2222         if (ret != 0)
2223                 return ret;
2224
2225         /* Use default specified by driver, if nb_tx_desc is zero */
2226         if (nb_tx_desc == 0) {
2227                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2228                 /* If driver default is zero, fall back on EAL default */
2229                 if (nb_tx_desc == 0)
2230                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2231         }
2232         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2233             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2234             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2235                 RTE_ETHDEV_LOG(ERR,
2236                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2237                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2238                         dev_info.tx_desc_lim.nb_min,
2239                         dev_info.tx_desc_lim.nb_align);
2240                 return -EINVAL;
2241         }
2242
2243         if (dev->data->dev_started &&
2244                 !(dev_info.dev_capa &
2245                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2246                 return -EBUSY;
2247
2248         if (dev->data->dev_started &&
2249                 (dev->data->tx_queue_state[tx_queue_id] !=
2250                         RTE_ETH_QUEUE_STATE_STOPPED))
2251                 return -EBUSY;
2252
2253         txq = dev->data->tx_queues;
2254         if (txq[tx_queue_id]) {
2255                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2256                                         -ENOTSUP);
2257                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2258                 txq[tx_queue_id] = NULL;
2259         }
2260
2261         if (tx_conf == NULL)
2262                 tx_conf = &dev_info.default_txconf;
2263
2264         local_conf = *tx_conf;
2265
2266         /*
2267          * If an offloading has already been enabled in
2268          * rte_eth_dev_configure(), it has been enabled on all queues,
2269          * so there is no need to enable it in this queue again.
2270          * The local_conf.offloads input to underlying PMD only carries
2271          * those offloadings which are only enabled on this queue and
2272          * not enabled on all queues.
2273          */
2274         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2275
2276         /*
2277          * New added offloadings for this queue are those not enabled in
2278          * rte_eth_dev_configure() and they must be per-queue type.
2279          * A pure per-port offloading can't be enabled on a queue while
2280          * disabled on another queue. A pure per-port offloading can't
2281          * be enabled for any queue as new added one if it hasn't been
2282          * enabled in rte_eth_dev_configure().
2283          */
2284         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2285              local_conf.offloads) {
2286                 RTE_ETHDEV_LOG(ERR,
2287                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2288                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2289                         port_id, tx_queue_id, local_conf.offloads,
2290                         dev_info.tx_queue_offload_capa,
2291                         __func__);
2292                 return -EINVAL;
2293         }
2294
2295         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2296         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2297                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2298 }
2299
2300 int
2301 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2302                                uint16_t nb_tx_desc,
2303                                const struct rte_eth_hairpin_conf *conf)
2304 {
2305         struct rte_eth_dev *dev;
2306         struct rte_eth_hairpin_cap cap;
2307         void **txq;
2308         int i;
2309         int count;
2310         int ret;
2311
2312         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2313         dev = &rte_eth_devices[port_id];
2314         if (tx_queue_id >= dev->data->nb_tx_queues) {
2315                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2316                 return -EINVAL;
2317         }
2318         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2319         if (ret != 0)
2320                 return ret;
2321         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2322                                 -ENOTSUP);
2323         /* if nb_rx_desc is zero use max number of desc from the driver. */
2324         if (nb_tx_desc == 0)
2325                 nb_tx_desc = cap.max_nb_desc;
2326         if (nb_tx_desc > cap.max_nb_desc) {
2327                 RTE_ETHDEV_LOG(ERR,
2328                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2329                         nb_tx_desc, cap.max_nb_desc);
2330                 return -EINVAL;
2331         }
2332         if (conf->peer_count > cap.max_tx_2_rx) {
2333                 RTE_ETHDEV_LOG(ERR,
2334                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2335                         conf->peer_count, cap.max_tx_2_rx);
2336                 return -EINVAL;
2337         }
2338         if (conf->peer_count == 0) {
2339                 RTE_ETHDEV_LOG(ERR,
2340                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2341                         conf->peer_count);
2342                 return -EINVAL;
2343         }
2344         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2345              cap.max_nb_queues != UINT16_MAX; i++) {
2346                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2347                         count++;
2348         }
2349         if (count > cap.max_nb_queues) {
2350                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2351                 cap.max_nb_queues);
2352                 return -EINVAL;
2353         }
2354         if (dev->data->dev_started)
2355                 return -EBUSY;
2356         txq = dev->data->tx_queues;
2357         if (txq[tx_queue_id] != NULL) {
2358                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2359                                         -ENOTSUP);
2360                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2361                 txq[tx_queue_id] = NULL;
2362         }
2363         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2364                 (dev, tx_queue_id, nb_tx_desc, conf);
2365         if (ret == 0)
2366                 dev->data->tx_queue_state[tx_queue_id] =
2367                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2368         return eth_err(port_id, ret);
2369 }
2370
2371 int
2372 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2373 {
2374         struct rte_eth_dev *dev;
2375         int ret;
2376
2377         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2378         dev = &rte_eth_devices[tx_port];
2379         if (dev->data->dev_started == 0) {
2380                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2381                 return -EBUSY;
2382         }
2383
2384         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2385         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2386         if (ret != 0)
2387                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2388                                " to Rx %d (%d - all ports)\n",
2389                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2390
2391         return ret;
2392 }
2393
2394 int
2395 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2396 {
2397         struct rte_eth_dev *dev;
2398         int ret;
2399
2400         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2401         dev = &rte_eth_devices[tx_port];
2402         if (dev->data->dev_started == 0) {
2403                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2404                 return -EBUSY;
2405         }
2406
2407         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2408         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2409         if (ret != 0)
2410                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2411                                " from Rx %d (%d - all ports)\n",
2412                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2413
2414         return ret;
2415 }
2416
2417 int
2418 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2419                                size_t len, uint32_t direction)
2420 {
2421         struct rte_eth_dev *dev;
2422         int ret;
2423
2424         if (peer_ports == NULL || len == 0)
2425                 return -EINVAL;
2426
2427         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2428         dev = &rte_eth_devices[port_id];
2429         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2430                                 -ENOTSUP);
2431
2432         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2433                                                       len, direction);
2434         if (ret < 0)
2435                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2436                                port_id, direction ? "Rx" : "Tx");
2437
2438         return ret;
2439 }
2440
2441 void
2442 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2443                 void *userdata __rte_unused)
2444 {
2445         rte_pktmbuf_free_bulk(pkts, unsent);
2446 }
2447
2448 void
2449 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2450                 void *userdata)
2451 {
2452         uint64_t *count = userdata;
2453
2454         rte_pktmbuf_free_bulk(pkts, unsent);
2455         *count += unsent;
2456 }
2457
2458 int
2459 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2460                 buffer_tx_error_fn cbfn, void *userdata)
2461 {
2462         buffer->error_callback = cbfn;
2463         buffer->error_userdata = userdata;
2464         return 0;
2465 }
2466
2467 int
2468 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2469 {
2470         int ret = 0;
2471
2472         if (buffer == NULL)
2473                 return -EINVAL;
2474
2475         buffer->size = size;
2476         if (buffer->error_callback == NULL) {
2477                 ret = rte_eth_tx_buffer_set_err_callback(
2478                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2479         }
2480
2481         return ret;
2482 }
2483
2484 int
2485 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2486 {
2487         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2488         int ret;
2489
2490         /* Validate Input Data. Bail if not valid or not supported. */
2491         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2492         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2493
2494         /* Call driver to free pending mbufs. */
2495         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2496                                                free_cnt);
2497         return eth_err(port_id, ret);
2498 }
2499
2500 int
2501 rte_eth_promiscuous_enable(uint16_t port_id)
2502 {
2503         struct rte_eth_dev *dev;
2504         int diag = 0;
2505
2506         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2507         dev = &rte_eth_devices[port_id];
2508
2509         if (dev->data->promiscuous == 1)
2510                 return 0;
2511
2512         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2513
2514         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2515         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2516
2517         return eth_err(port_id, diag);
2518 }
2519
2520 int
2521 rte_eth_promiscuous_disable(uint16_t port_id)
2522 {
2523         struct rte_eth_dev *dev;
2524         int diag = 0;
2525
2526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2527         dev = &rte_eth_devices[port_id];
2528
2529         if (dev->data->promiscuous == 0)
2530                 return 0;
2531
2532         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2533
2534         dev->data->promiscuous = 0;
2535         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2536         if (diag != 0)
2537                 dev->data->promiscuous = 1;
2538
2539         return eth_err(port_id, diag);
2540 }
2541
2542 int
2543 rte_eth_promiscuous_get(uint16_t port_id)
2544 {
2545         struct rte_eth_dev *dev;
2546
2547         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2548
2549         dev = &rte_eth_devices[port_id];
2550         return dev->data->promiscuous;
2551 }
2552
2553 int
2554 rte_eth_allmulticast_enable(uint16_t port_id)
2555 {
2556         struct rte_eth_dev *dev;
2557         int diag;
2558
2559         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2560         dev = &rte_eth_devices[port_id];
2561
2562         if (dev->data->all_multicast == 1)
2563                 return 0;
2564
2565         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2566         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2567         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2568
2569         return eth_err(port_id, diag);
2570 }
2571
2572 int
2573 rte_eth_allmulticast_disable(uint16_t port_id)
2574 {
2575         struct rte_eth_dev *dev;
2576         int diag;
2577
2578         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2579         dev = &rte_eth_devices[port_id];
2580
2581         if (dev->data->all_multicast == 0)
2582                 return 0;
2583
2584         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2585         dev->data->all_multicast = 0;
2586         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2587         if (diag != 0)
2588                 dev->data->all_multicast = 1;
2589
2590         return eth_err(port_id, diag);
2591 }
2592
2593 int
2594 rte_eth_allmulticast_get(uint16_t port_id)
2595 {
2596         struct rte_eth_dev *dev;
2597
2598         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2599
2600         dev = &rte_eth_devices[port_id];
2601         return dev->data->all_multicast;
2602 }
2603
2604 int
2605 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2606 {
2607         struct rte_eth_dev *dev;
2608
2609         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2610         dev = &rte_eth_devices[port_id];
2611
2612         if (dev->data->dev_conf.intr_conf.lsc &&
2613             dev->data->dev_started)
2614                 rte_eth_linkstatus_get(dev, eth_link);
2615         else {
2616                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2617                 (*dev->dev_ops->link_update)(dev, 1);
2618                 *eth_link = dev->data->dev_link;
2619         }
2620
2621         return 0;
2622 }
2623
2624 int
2625 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2626 {
2627         struct rte_eth_dev *dev;
2628
2629         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2630         dev = &rte_eth_devices[port_id];
2631
2632         if (dev->data->dev_conf.intr_conf.lsc &&
2633             dev->data->dev_started)
2634                 rte_eth_linkstatus_get(dev, eth_link);
2635         else {
2636                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2637                 (*dev->dev_ops->link_update)(dev, 0);
2638                 *eth_link = dev->data->dev_link;
2639         }
2640
2641         return 0;
2642 }
2643
2644 const char *
2645 rte_eth_link_speed_to_str(uint32_t link_speed)
2646 {
2647         switch (link_speed) {
2648         case ETH_SPEED_NUM_NONE: return "None";
2649         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2650         case ETH_SPEED_NUM_100M: return "100 Mbps";
2651         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2652         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2653         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2654         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2655         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2656         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2657         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2658         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2659         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2660         case ETH_SPEED_NUM_100G: return "100 Gbps";
2661         case ETH_SPEED_NUM_200G: return "200 Gbps";
2662         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2663         default: return "Invalid";
2664         }
2665 }
2666
2667 int
2668 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2669 {
2670         if (eth_link->link_status == ETH_LINK_DOWN)
2671                 return snprintf(str, len, "Link down");
2672         else
2673                 return snprintf(str, len, "Link up at %s %s %s",
2674                         rte_eth_link_speed_to_str(eth_link->link_speed),
2675                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2676                         "FDX" : "HDX",
2677                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2678                         "Autoneg" : "Fixed");
2679 }
2680
2681 int
2682 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2683 {
2684         struct rte_eth_dev *dev;
2685
2686         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2687
2688         dev = &rte_eth_devices[port_id];
2689         memset(stats, 0, sizeof(*stats));
2690
2691         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2692         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2693         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2694 }
2695
2696 int
2697 rte_eth_stats_reset(uint16_t port_id)
2698 {
2699         struct rte_eth_dev *dev;
2700         int ret;
2701
2702         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2703         dev = &rte_eth_devices[port_id];
2704
2705         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2706         ret = (*dev->dev_ops->stats_reset)(dev);
2707         if (ret != 0)
2708                 return eth_err(port_id, ret);
2709
2710         dev->data->rx_mbuf_alloc_failed = 0;
2711
2712         return 0;
2713 }
2714
2715 static inline int
2716 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2717 {
2718         uint16_t nb_rxqs, nb_txqs;
2719         int count;
2720
2721         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2722         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2723
2724         count = RTE_NB_STATS;
2725         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2726                 count += nb_rxqs * RTE_NB_RXQ_STATS;
2727                 count += nb_txqs * RTE_NB_TXQ_STATS;
2728         }
2729
2730         return count;
2731 }
2732
2733 static int
2734 eth_dev_get_xstats_count(uint16_t port_id)
2735 {
2736         struct rte_eth_dev *dev;
2737         int count;
2738
2739         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2740         dev = &rte_eth_devices[port_id];
2741         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2742                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2743                                 NULL, 0);
2744                 if (count < 0)
2745                         return eth_err(port_id, count);
2746         }
2747         if (dev->dev_ops->xstats_get_names != NULL) {
2748                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2749                 if (count < 0)
2750                         return eth_err(port_id, count);
2751         } else
2752                 count = 0;
2753
2754
2755         count += eth_dev_get_xstats_basic_count(dev);
2756
2757         return count;
2758 }
2759
2760 int
2761 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2762                 uint64_t *id)
2763 {
2764         int cnt_xstats, idx_xstat;
2765
2766         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2767
2768         if (!id) {
2769                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2770                 return -ENOMEM;
2771         }
2772
2773         if (!xstat_name) {
2774                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2775                 return -ENOMEM;
2776         }
2777
2778         /* Get count */
2779         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2780         if (cnt_xstats  < 0) {
2781                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2782                 return -ENODEV;
2783         }
2784
2785         /* Get id-name lookup table */
2786         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2787
2788         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2789                         port_id, xstats_names, cnt_xstats, NULL)) {
2790                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2791                 return -1;
2792         }
2793
2794         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2795                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2796                         *id = idx_xstat;
2797                         return 0;
2798                 };
2799         }
2800
2801         return -EINVAL;
2802 }
2803
2804 /* retrieve basic stats names */
2805 static int
2806 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2807         struct rte_eth_xstat_name *xstats_names)
2808 {
2809         int cnt_used_entries = 0;
2810         uint32_t idx, id_queue;
2811         uint16_t num_q;
2812
2813         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2814                 strlcpy(xstats_names[cnt_used_entries].name,
2815                         eth_dev_stats_strings[idx].name,
2816                         sizeof(xstats_names[0].name));
2817                 cnt_used_entries++;
2818         }
2819
2820         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2821                 return cnt_used_entries;
2822
2823         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2824         for (id_queue = 0; id_queue < num_q; id_queue++) {
2825                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2826                         snprintf(xstats_names[cnt_used_entries].name,
2827                                 sizeof(xstats_names[0].name),
2828                                 "rx_q%u_%s",
2829                                 id_queue, eth_dev_rxq_stats_strings[idx].name);
2830                         cnt_used_entries++;
2831                 }
2832
2833         }
2834         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2835         for (id_queue = 0; id_queue < num_q; id_queue++) {
2836                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2837                         snprintf(xstats_names[cnt_used_entries].name,
2838                                 sizeof(xstats_names[0].name),
2839                                 "tx_q%u_%s",
2840                                 id_queue, eth_dev_txq_stats_strings[idx].name);
2841                         cnt_used_entries++;
2842                 }
2843         }
2844         return cnt_used_entries;
2845 }
2846
2847 /* retrieve ethdev extended statistics names */
2848 int
2849 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2850         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2851         uint64_t *ids)
2852 {
2853         struct rte_eth_xstat_name *xstats_names_copy;
2854         unsigned int no_basic_stat_requested = 1;
2855         unsigned int no_ext_stat_requested = 1;
2856         unsigned int expected_entries;
2857         unsigned int basic_count;
2858         struct rte_eth_dev *dev;
2859         unsigned int i;
2860         int ret;
2861
2862         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2863         dev = &rte_eth_devices[port_id];
2864
2865         basic_count = eth_dev_get_xstats_basic_count(dev);
2866         ret = eth_dev_get_xstats_count(port_id);
2867         if (ret < 0)
2868                 return ret;
2869         expected_entries = (unsigned int)ret;
2870
2871         /* Return max number of stats if no ids given */
2872         if (!ids) {
2873                 if (!xstats_names)
2874                         return expected_entries;
2875                 else if (xstats_names && size < expected_entries)
2876                         return expected_entries;
2877         }
2878
2879         if (ids && !xstats_names)
2880                 return -EINVAL;
2881
2882         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2883                 uint64_t ids_copy[size];
2884
2885                 for (i = 0; i < size; i++) {
2886                         if (ids[i] < basic_count) {
2887                                 no_basic_stat_requested = 0;
2888                                 break;
2889                         }
2890
2891                         /*
2892                          * Convert ids to xstats ids that PMD knows.
2893                          * ids known by user are basic + extended stats.
2894                          */
2895                         ids_copy[i] = ids[i] - basic_count;
2896                 }
2897
2898                 if (no_basic_stat_requested)
2899                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2900                                         xstats_names, ids_copy, size);
2901         }
2902
2903         /* Retrieve all stats */
2904         if (!ids) {
2905                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2906                                 expected_entries);
2907                 if (num_stats < 0 || num_stats > (int)expected_entries)
2908                         return num_stats;
2909                 else
2910                         return expected_entries;
2911         }
2912
2913         xstats_names_copy = calloc(expected_entries,
2914                 sizeof(struct rte_eth_xstat_name));
2915
2916         if (!xstats_names_copy) {
2917                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2918                 return -ENOMEM;
2919         }
2920
2921         if (ids) {
2922                 for (i = 0; i < size; i++) {
2923                         if (ids[i] >= basic_count) {
2924                                 no_ext_stat_requested = 0;
2925                                 break;
2926                         }
2927                 }
2928         }
2929
2930         /* Fill xstats_names_copy structure */
2931         if (ids && no_ext_stat_requested) {
2932                 eth_basic_stats_get_names(dev, xstats_names_copy);
2933         } else {
2934                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2935                         expected_entries);
2936                 if (ret < 0) {
2937                         free(xstats_names_copy);
2938                         return ret;
2939                 }
2940         }
2941
2942         /* Filter stats */
2943         for (i = 0; i < size; i++) {
2944                 if (ids[i] >= expected_entries) {
2945                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2946                         free(xstats_names_copy);
2947                         return -1;
2948                 }
2949                 xstats_names[i] = xstats_names_copy[ids[i]];
2950         }
2951
2952         free(xstats_names_copy);
2953         return size;
2954 }
2955
2956 int
2957 rte_eth_xstats_get_names(uint16_t port_id,
2958         struct rte_eth_xstat_name *xstats_names,
2959         unsigned int size)
2960 {
2961         struct rte_eth_dev *dev;
2962         int cnt_used_entries;
2963         int cnt_expected_entries;
2964         int cnt_driver_entries;
2965
2966         cnt_expected_entries = eth_dev_get_xstats_count(port_id);
2967         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2968                         (int)size < cnt_expected_entries)
2969                 return cnt_expected_entries;
2970
2971         /* port_id checked in eth_dev_get_xstats_count() */
2972         dev = &rte_eth_devices[port_id];
2973
2974         cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
2975
2976         if (dev->dev_ops->xstats_get_names != NULL) {
2977                 /* If there are any driver-specific xstats, append them
2978                  * to end of list.
2979                  */
2980                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2981                         dev,
2982                         xstats_names + cnt_used_entries,
2983                         size - cnt_used_entries);
2984                 if (cnt_driver_entries < 0)
2985                         return eth_err(port_id, cnt_driver_entries);
2986                 cnt_used_entries += cnt_driver_entries;
2987         }
2988
2989         return cnt_used_entries;
2990 }
2991
2992
2993 static int
2994 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2995 {
2996         struct rte_eth_dev *dev;
2997         struct rte_eth_stats eth_stats;
2998         unsigned int count = 0, i, q;
2999         uint64_t val, *stats_ptr;
3000         uint16_t nb_rxqs, nb_txqs;
3001         int ret;
3002
3003         ret = rte_eth_stats_get(port_id, &eth_stats);
3004         if (ret < 0)
3005                 return ret;
3006
3007         dev = &rte_eth_devices[port_id];
3008
3009         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3010         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3011
3012         /* global stats */
3013         for (i = 0; i < RTE_NB_STATS; i++) {
3014                 stats_ptr = RTE_PTR_ADD(&eth_stats,
3015                                         eth_dev_stats_strings[i].offset);
3016                 val = *stats_ptr;
3017                 xstats[count++].value = val;
3018         }
3019
3020         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3021                 return count;
3022
3023         /* per-rxq stats */
3024         for (q = 0; q < nb_rxqs; q++) {
3025                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3026                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3027                                         eth_dev_rxq_stats_strings[i].offset +
3028                                         q * sizeof(uint64_t));
3029                         val = *stats_ptr;
3030                         xstats[count++].value = val;
3031                 }
3032         }
3033
3034         /* per-txq stats */
3035         for (q = 0; q < nb_txqs; q++) {
3036                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3037                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3038                                         eth_dev_txq_stats_strings[i].offset +
3039                                         q * sizeof(uint64_t));
3040                         val = *stats_ptr;
3041                         xstats[count++].value = val;
3042                 }
3043         }
3044         return count;
3045 }
3046
3047 /* retrieve ethdev extended statistics */
3048 int
3049 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3050                          uint64_t *values, unsigned int size)
3051 {
3052         unsigned int no_basic_stat_requested = 1;
3053         unsigned int no_ext_stat_requested = 1;
3054         unsigned int num_xstats_filled;
3055         unsigned int basic_count;
3056         uint16_t expected_entries;
3057         struct rte_eth_dev *dev;
3058         unsigned int i;
3059         int ret;
3060
3061         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3062         ret = eth_dev_get_xstats_count(port_id);
3063         if (ret < 0)
3064                 return ret;
3065         expected_entries = (uint16_t)ret;
3066         struct rte_eth_xstat xstats[expected_entries];
3067         dev = &rte_eth_devices[port_id];
3068         basic_count = eth_dev_get_xstats_basic_count(dev);
3069
3070         /* Return max number of stats if no ids given */
3071         if (!ids) {
3072                 if (!values)
3073                         return expected_entries;
3074                 else if (values && size < expected_entries)
3075                         return expected_entries;
3076         }
3077
3078         if (ids && !values)
3079                 return -EINVAL;
3080
3081         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3082                 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3083                 uint64_t ids_copy[size];
3084
3085                 for (i = 0; i < size; i++) {
3086                         if (ids[i] < basic_count) {
3087                                 no_basic_stat_requested = 0;
3088                                 break;
3089                         }
3090
3091                         /*
3092                          * Convert ids to xstats ids that PMD knows.
3093                          * ids known by user are basic + extended stats.
3094                          */
3095                         ids_copy[i] = ids[i] - basic_count;
3096                 }
3097
3098                 if (no_basic_stat_requested)
3099                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3100                                         values, size);
3101         }
3102
3103         if (ids) {
3104                 for (i = 0; i < size; i++) {
3105                         if (ids[i] >= basic_count) {
3106                                 no_ext_stat_requested = 0;
3107                                 break;
3108                         }
3109                 }
3110         }
3111
3112         /* Fill the xstats structure */
3113         if (ids && no_ext_stat_requested)
3114                 ret = eth_basic_stats_get(port_id, xstats);
3115         else
3116                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3117
3118         if (ret < 0)
3119                 return ret;
3120         num_xstats_filled = (unsigned int)ret;
3121
3122         /* Return all stats */
3123         if (!ids) {
3124                 for (i = 0; i < num_xstats_filled; i++)
3125                         values[i] = xstats[i].value;
3126                 return expected_entries;
3127         }
3128
3129         /* Filter stats */
3130         for (i = 0; i < size; i++) {
3131                 if (ids[i] >= expected_entries) {
3132                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3133                         return -1;
3134                 }
3135                 values[i] = xstats[ids[i]].value;
3136         }
3137         return size;
3138 }
3139
3140 int
3141 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3142         unsigned int n)
3143 {
3144         struct rte_eth_dev *dev;
3145         unsigned int count = 0, i;
3146         signed int xcount = 0;
3147         uint16_t nb_rxqs, nb_txqs;
3148         int ret;
3149
3150         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3151
3152         dev = &rte_eth_devices[port_id];
3153
3154         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3155         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3156
3157         /* Return generic statistics */
3158         count = RTE_NB_STATS;
3159         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3160                 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3161
3162         /* implemented by the driver */
3163         if (dev->dev_ops->xstats_get != NULL) {
3164                 /* Retrieve the xstats from the driver at the end of the
3165                  * xstats struct.
3166                  */
3167                 xcount = (*dev->dev_ops->xstats_get)(dev,
3168                                      xstats ? xstats + count : NULL,
3169                                      (n > count) ? n - count : 0);
3170
3171                 if (xcount < 0)
3172                         return eth_err(port_id, xcount);
3173         }
3174
3175         if (n < count + xcount || xstats == NULL)
3176                 return count + xcount;
3177
3178         /* now fill the xstats structure */
3179         ret = eth_basic_stats_get(port_id, xstats);
3180         if (ret < 0)
3181                 return ret;
3182         count = ret;
3183
3184         for (i = 0; i < count; i++)
3185                 xstats[i].id = i;
3186         /* add an offset to driver-specific stats */
3187         for ( ; i < count + xcount; i++)
3188                 xstats[i].id += count;
3189
3190         return count + xcount;
3191 }
3192
3193 /* reset ethdev extended statistics */
3194 int
3195 rte_eth_xstats_reset(uint16_t port_id)
3196 {
3197         struct rte_eth_dev *dev;
3198
3199         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3200         dev = &rte_eth_devices[port_id];
3201
3202         /* implemented by the driver */
3203         if (dev->dev_ops->xstats_reset != NULL)
3204                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3205
3206         /* fallback to default */
3207         return rte_eth_stats_reset(port_id);
3208 }
3209
3210 static int
3211 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3212                 uint8_t stat_idx, uint8_t is_rx)
3213 {
3214         struct rte_eth_dev *dev;
3215
3216         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3217
3218         dev = &rte_eth_devices[port_id];
3219
3220         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3221
3222         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3223                 return -EINVAL;
3224
3225         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3226                 return -EINVAL;
3227
3228         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3229                 return -EINVAL;
3230
3231         return (*dev->dev_ops->queue_stats_mapping_set)
3232                         (dev, queue_id, stat_idx, is_rx);
3233 }
3234
3235
3236 int
3237 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3238                 uint8_t stat_idx)
3239 {
3240         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3241                                                 tx_queue_id,
3242                                                 stat_idx, STAT_QMAP_TX));
3243 }
3244
3245
3246 int
3247 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3248                 uint8_t stat_idx)
3249 {
3250         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3251                                                 rx_queue_id,
3252                                                 stat_idx, STAT_QMAP_RX));
3253 }
3254
3255 int
3256 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3257 {
3258         struct rte_eth_dev *dev;
3259
3260         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3261         dev = &rte_eth_devices[port_id];
3262
3263         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3264         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3265                                                         fw_version, fw_size));
3266 }
3267
3268 int
3269 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3270 {
3271         struct rte_eth_dev *dev;
3272         const struct rte_eth_desc_lim lim = {
3273                 .nb_max = UINT16_MAX,
3274                 .nb_min = 0,
3275                 .nb_align = 1,
3276                 .nb_seg_max = UINT16_MAX,
3277                 .nb_mtu_seg_max = UINT16_MAX,
3278         };
3279         int diag;
3280
3281         /*
3282          * Init dev_info before port_id check since caller does not have
3283          * return status and does not know if get is successful or not.
3284          */
3285         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3286         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3287
3288         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3289         dev = &rte_eth_devices[port_id];
3290
3291         dev_info->rx_desc_lim = lim;
3292         dev_info->tx_desc_lim = lim;
3293         dev_info->device = dev->device;
3294         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3295         dev_info->max_mtu = UINT16_MAX;
3296
3297         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3298         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3299         if (diag != 0) {
3300                 /* Cleanup already filled in device information */
3301                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3302                 return eth_err(port_id, diag);
3303         }
3304
3305         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3306         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3307                         RTE_MAX_QUEUES_PER_PORT);
3308         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3309                         RTE_MAX_QUEUES_PER_PORT);
3310
3311         dev_info->driver_name = dev->device->driver->name;
3312         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3313         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3314
3315         dev_info->dev_flags = &dev->data->dev_flags;
3316
3317         return 0;
3318 }
3319
3320 int
3321 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3322                                  uint32_t *ptypes, int num)
3323 {
3324         int i, j;
3325         struct rte_eth_dev *dev;
3326         const uint32_t *all_ptypes;
3327
3328         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3329         dev = &rte_eth_devices[port_id];
3330         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3331         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3332
3333         if (!all_ptypes)
3334                 return 0;
3335
3336         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3337                 if (all_ptypes[i] & ptype_mask) {
3338                         if (j < num)
3339                                 ptypes[j] = all_ptypes[i];
3340                         j++;
3341                 }
3342
3343         return j;
3344 }
3345
3346 int
3347 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3348                                  uint32_t *set_ptypes, unsigned int num)
3349 {
3350         const uint32_t valid_ptype_masks[] = {
3351                 RTE_PTYPE_L2_MASK,
3352                 RTE_PTYPE_L3_MASK,
3353                 RTE_PTYPE_L4_MASK,
3354                 RTE_PTYPE_TUNNEL_MASK,
3355                 RTE_PTYPE_INNER_L2_MASK,
3356                 RTE_PTYPE_INNER_L3_MASK,
3357                 RTE_PTYPE_INNER_L4_MASK,
3358         };
3359         const uint32_t *all_ptypes;
3360         struct rte_eth_dev *dev;
3361         uint32_t unused_mask;
3362         unsigned int i, j;
3363         int ret;
3364
3365         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3366         dev = &rte_eth_devices[port_id];
3367
3368         if (num > 0 && set_ptypes == NULL)
3369                 return -EINVAL;
3370
3371         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3372                         *dev->dev_ops->dev_ptypes_set == NULL) {
3373                 ret = 0;
3374                 goto ptype_unknown;
3375         }
3376
3377         if (ptype_mask == 0) {
3378                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3379                                 ptype_mask);
3380                 goto ptype_unknown;
3381         }
3382
3383         unused_mask = ptype_mask;
3384         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3385                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3386                 if (mask && mask != valid_ptype_masks[i]) {
3387                         ret = -EINVAL;
3388                         goto ptype_unknown;
3389                 }
3390                 unused_mask &= ~valid_ptype_masks[i];
3391         }
3392
3393         if (unused_mask) {
3394                 ret = -EINVAL;
3395                 goto ptype_unknown;
3396         }
3397
3398         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3399         if (all_ptypes == NULL) {
3400                 ret = 0;
3401                 goto ptype_unknown;
3402         }
3403
3404         /*
3405          * Accommodate as many set_ptypes as possible. If the supplied
3406          * set_ptypes array is insufficient fill it partially.
3407          */
3408         for (i = 0, j = 0; set_ptypes != NULL &&
3409                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3410                 if (ptype_mask & all_ptypes[i]) {
3411                         if (j < num - 1) {
3412                                 set_ptypes[j] = all_ptypes[i];
3413                                 j++;
3414                                 continue;
3415                         }
3416                         break;
3417                 }
3418         }
3419
3420         if (set_ptypes != NULL && j < num)
3421                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3422
3423         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3424
3425 ptype_unknown:
3426         if (num > 0)
3427                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3428
3429         return ret;
3430 }
3431
3432 int
3433 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3434 {
3435         struct rte_eth_dev *dev;
3436
3437         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3438         dev = &rte_eth_devices[port_id];
3439         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3440
3441         return 0;
3442 }
3443
3444 int
3445 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3446 {
3447         struct rte_eth_dev *dev;
3448
3449         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3450
3451         dev = &rte_eth_devices[port_id];
3452         *mtu = dev->data->mtu;
3453         return 0;
3454 }
3455
3456 int
3457 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3458 {
3459         int ret;
3460         struct rte_eth_dev_info dev_info;
3461         struct rte_eth_dev *dev;
3462
3463         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3464         dev = &rte_eth_devices[port_id];
3465         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3466
3467         /*
3468          * Check if the device supports dev_infos_get, if it does not
3469          * skip min_mtu/max_mtu validation here as this requires values
3470          * that are populated within the call to rte_eth_dev_info_get()
3471          * which relies on dev->dev_ops->dev_infos_get.
3472          */
3473         if (*dev->dev_ops->dev_infos_get != NULL) {
3474                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3475                 if (ret != 0)
3476                         return ret;
3477
3478                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3479                         return -EINVAL;
3480         }
3481
3482         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3483         if (!ret)
3484                 dev->data->mtu = mtu;
3485
3486         return eth_err(port_id, ret);
3487 }
3488
3489 int
3490 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3491 {
3492         struct rte_eth_dev *dev;
3493         int ret;
3494
3495         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3496         dev = &rte_eth_devices[port_id];
3497         if (!(dev->data->dev_conf.rxmode.offloads &
3498               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3499                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3500                         port_id);
3501                 return -ENOSYS;
3502         }
3503
3504         if (vlan_id > 4095) {
3505                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3506                         port_id, vlan_id);
3507                 return -EINVAL;
3508         }
3509         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3510
3511         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3512         if (ret == 0) {
3513                 struct rte_vlan_filter_conf *vfc;
3514                 int vidx;
3515                 int vbit;
3516
3517                 vfc = &dev->data->vlan_filter_conf;
3518                 vidx = vlan_id / 64;
3519                 vbit = vlan_id % 64;
3520
3521                 if (on)
3522                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3523                 else
3524                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3525         }
3526
3527         return eth_err(port_id, ret);
3528 }
3529
3530 int
3531 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3532                                     int on)
3533 {
3534         struct rte_eth_dev *dev;
3535
3536         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3537         dev = &rte_eth_devices[port_id];
3538         if (rx_queue_id >= dev->data->nb_rx_queues) {
3539                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3540                 return -EINVAL;
3541         }
3542
3543         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3544         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3545
3546         return 0;
3547 }
3548
3549 int
3550 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3551                                 enum rte_vlan_type vlan_type,
3552                                 uint16_t tpid)
3553 {
3554         struct rte_eth_dev *dev;
3555
3556         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3557         dev = &rte_eth_devices[port_id];
3558         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3559
3560         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3561                                                                tpid));
3562 }
3563
3564 int
3565 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3566 {
3567         struct rte_eth_dev_info dev_info;
3568         struct rte_eth_dev *dev;
3569         int ret = 0;
3570         int mask = 0;
3571         int cur, org = 0;
3572         uint64_t orig_offloads;
3573         uint64_t dev_offloads;
3574         uint64_t new_offloads;
3575
3576         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3577         dev = &rte_eth_devices[port_id];
3578
3579         /* save original values in case of failure */
3580         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3581         dev_offloads = orig_offloads;
3582
3583         /* check which option changed by application */
3584         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3585         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3586         if (cur != org) {
3587                 if (cur)
3588                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3589                 else
3590                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3591                 mask |= ETH_VLAN_STRIP_MASK;
3592         }
3593
3594         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3595         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3596         if (cur != org) {
3597                 if (cur)
3598                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3599                 else
3600                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3601                 mask |= ETH_VLAN_FILTER_MASK;
3602         }
3603
3604         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3605         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3606         if (cur != org) {
3607                 if (cur)
3608                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3609                 else
3610                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3611                 mask |= ETH_VLAN_EXTEND_MASK;
3612         }
3613
3614         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3615         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3616         if (cur != org) {
3617                 if (cur)
3618                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3619                 else
3620                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3621                 mask |= ETH_QINQ_STRIP_MASK;
3622         }
3623
3624         /*no change*/
3625         if (mask == 0)
3626                 return ret;
3627
3628         ret = rte_eth_dev_info_get(port_id, &dev_info);
3629         if (ret != 0)
3630                 return ret;
3631
3632         /* Rx VLAN offloading must be within its device capabilities */
3633         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3634                 new_offloads = dev_offloads & ~orig_offloads;
3635                 RTE_ETHDEV_LOG(ERR,
3636                         "Ethdev port_id=%u requested new added VLAN offloads "
3637                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3638                         "0x%" PRIx64 " in %s()\n",
3639                         port_id, new_offloads, dev_info.rx_offload_capa,
3640                         __func__);
3641                 return -EINVAL;
3642         }
3643
3644         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3645         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3646         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3647         if (ret) {
3648                 /* hit an error restore  original values */
3649                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3650         }
3651
3652         return eth_err(port_id, ret);
3653 }
3654
3655 int
3656 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3657 {
3658         struct rte_eth_dev *dev;
3659         uint64_t *dev_offloads;
3660         int ret = 0;
3661
3662         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3663         dev = &rte_eth_devices[port_id];
3664         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3665
3666         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3667                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3668
3669         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3670                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3671
3672         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3673                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3674
3675         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3676                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3677
3678         return ret;
3679 }
3680
3681 int
3682 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3683 {
3684         struct rte_eth_dev *dev;
3685
3686         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3687         dev = &rte_eth_devices[port_id];
3688         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3689
3690         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3691 }
3692
3693 int
3694 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3695 {
3696         struct rte_eth_dev *dev;
3697
3698         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3699         dev = &rte_eth_devices[port_id];
3700         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3701         memset(fc_conf, 0, sizeof(*fc_conf));
3702         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3703 }
3704
3705 int
3706 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3707 {
3708         struct rte_eth_dev *dev;
3709
3710         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3711         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3712                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3713                 return -EINVAL;
3714         }
3715
3716         dev = &rte_eth_devices[port_id];
3717         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3718         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3719 }
3720
3721 int
3722 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3723                                    struct rte_eth_pfc_conf *pfc_conf)
3724 {
3725         struct rte_eth_dev *dev;
3726
3727         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3728         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3729                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3730                 return -EINVAL;
3731         }
3732
3733         dev = &rte_eth_devices[port_id];
3734         /* High water, low water validation are device specific */
3735         if  (*dev->dev_ops->priority_flow_ctrl_set)
3736                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3737                                         (dev, pfc_conf));
3738         return -ENOTSUP;
3739 }
3740
3741 static int
3742 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3743                         uint16_t reta_size)
3744 {
3745         uint16_t i, num;
3746
3747         if (!reta_conf)
3748                 return -EINVAL;
3749
3750         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3751         for (i = 0; i < num; i++) {
3752                 if (reta_conf[i].mask)
3753                         return 0;
3754         }
3755
3756         return -EINVAL;
3757 }
3758
3759 static int
3760 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3761                          uint16_t reta_size,
3762                          uint16_t max_rxq)
3763 {
3764         uint16_t i, idx, shift;
3765
3766         if (!reta_conf)
3767                 return -EINVAL;
3768
3769         if (max_rxq == 0) {
3770                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3771                 return -EINVAL;
3772         }
3773
3774         for (i = 0; i < reta_size; i++) {
3775                 idx = i / RTE_RETA_GROUP_SIZE;
3776                 shift = i % RTE_RETA_GROUP_SIZE;
3777                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3778                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3779                         RTE_ETHDEV_LOG(ERR,
3780                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3781                                 idx, shift,
3782                                 reta_conf[idx].reta[shift], max_rxq);
3783                         return -EINVAL;
3784                 }
3785         }
3786
3787         return 0;
3788 }
3789
3790 int
3791 rte_eth_dev_rss_reta_update(uint16_t port_id,
3792                             struct rte_eth_rss_reta_entry64 *reta_conf,
3793                             uint16_t reta_size)
3794 {
3795         struct rte_eth_dev *dev;
3796         int ret;
3797
3798         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3799         /* Check mask bits */
3800         ret = eth_check_reta_mask(reta_conf, reta_size);
3801         if (ret < 0)
3802                 return ret;
3803
3804         dev = &rte_eth_devices[port_id];
3805
3806         /* Check entry value */
3807         ret = eth_check_reta_entry(reta_conf, reta_size,
3808                                 dev->data->nb_rx_queues);
3809         if (ret < 0)
3810                 return ret;
3811
3812         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3813         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3814                                                              reta_size));
3815 }
3816
3817 int
3818 rte_eth_dev_rss_reta_query(uint16_t port_id,
3819                            struct rte_eth_rss_reta_entry64 *reta_conf,
3820                            uint16_t reta_size)
3821 {
3822         struct rte_eth_dev *dev;
3823         int ret;
3824
3825         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3826
3827         /* Check mask bits */
3828         ret = eth_check_reta_mask(reta_conf, reta_size);
3829         if (ret < 0)
3830                 return ret;
3831
3832         dev = &rte_eth_devices[port_id];
3833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3834         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3835                                                             reta_size));
3836 }
3837
3838 int
3839 rte_eth_dev_rss_hash_update(uint16_t port_id,
3840                             struct rte_eth_rss_conf *rss_conf)
3841 {
3842         struct rte_eth_dev *dev;
3843         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3844         int ret;
3845
3846         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3847
3848         ret = rte_eth_dev_info_get(port_id, &dev_info);
3849         if (ret != 0)
3850                 return ret;
3851
3852         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3853
3854         dev = &rte_eth_devices[port_id];
3855         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3856             dev_info.flow_type_rss_offloads) {
3857                 RTE_ETHDEV_LOG(ERR,
3858                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3859                         port_id, rss_conf->rss_hf,
3860                         dev_info.flow_type_rss_offloads);
3861                 return -EINVAL;
3862         }
3863         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3864         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3865                                                                  rss_conf));
3866 }
3867
3868 int
3869 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3870                               struct rte_eth_rss_conf *rss_conf)
3871 {
3872         struct rte_eth_dev *dev;
3873
3874         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3875         dev = &rte_eth_devices[port_id];
3876         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3877         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3878                                                                    rss_conf));
3879 }
3880
3881 int
3882 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3883                                 struct rte_eth_udp_tunnel *udp_tunnel)
3884 {
3885         struct rte_eth_dev *dev;
3886
3887         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3888         if (udp_tunnel == NULL) {
3889                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3890                 return -EINVAL;
3891         }
3892
3893         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3894                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3895                 return -EINVAL;
3896         }
3897
3898         dev = &rte_eth_devices[port_id];
3899         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3900         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3901                                                                 udp_tunnel));
3902 }
3903
3904 int
3905 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3906                                    struct rte_eth_udp_tunnel *udp_tunnel)
3907 {
3908         struct rte_eth_dev *dev;
3909
3910         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3911         dev = &rte_eth_devices[port_id];
3912
3913         if (udp_tunnel == NULL) {
3914                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3915                 return -EINVAL;
3916         }
3917
3918         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3919                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3920                 return -EINVAL;
3921         }
3922
3923         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3924         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3925                                                                 udp_tunnel));
3926 }
3927
3928 int
3929 rte_eth_led_on(uint16_t port_id)
3930 {
3931         struct rte_eth_dev *dev;
3932
3933         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3934         dev = &rte_eth_devices[port_id];
3935         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3936         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3937 }
3938
3939 int
3940 rte_eth_led_off(uint16_t port_id)
3941 {
3942         struct rte_eth_dev *dev;
3943
3944         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3945         dev = &rte_eth_devices[port_id];
3946         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3947         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3948 }
3949
3950 int
3951 rte_eth_fec_get_capability(uint16_t port_id,
3952                            struct rte_eth_fec_capa *speed_fec_capa,
3953                            unsigned int num)
3954 {
3955         struct rte_eth_dev *dev;
3956         int ret;
3957
3958         if (speed_fec_capa == NULL && num > 0)
3959                 return -EINVAL;
3960
3961         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3962         dev = &rte_eth_devices[port_id];
3963         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
3964         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
3965
3966         return ret;
3967 }
3968
3969 int
3970 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
3971 {
3972         struct rte_eth_dev *dev;
3973
3974         if (fec_capa == NULL)
3975                 return -EINVAL;
3976
3977         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3978         dev = &rte_eth_devices[port_id];
3979         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
3980         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
3981 }
3982
3983 int
3984 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
3985 {
3986         struct rte_eth_dev *dev;
3987
3988         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3989         dev = &rte_eth_devices[port_id];
3990         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
3991         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
3992 }
3993
3994 /*
3995  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3996  * an empty spot.
3997  */
3998 static int
3999 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4000 {
4001         struct rte_eth_dev_info dev_info;
4002         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4003         unsigned i;
4004         int ret;
4005
4006         ret = rte_eth_dev_info_get(port_id, &dev_info);
4007         if (ret != 0)
4008                 return -1;
4009
4010         for (i = 0; i < dev_info.max_mac_addrs; i++)
4011                 if (memcmp(addr, &dev->data->mac_addrs[i],
4012                                 RTE_ETHER_ADDR_LEN) == 0)
4013                         return i;
4014
4015         return -1;
4016 }
4017
4018 static const struct rte_ether_addr null_mac_addr;
4019
4020 int
4021 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4022                         uint32_t pool)
4023 {
4024         struct rte_eth_dev *dev;
4025         int index;
4026         uint64_t pool_mask;
4027         int ret;
4028
4029         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4030         dev = &rte_eth_devices[port_id];
4031         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4032
4033         if (rte_is_zero_ether_addr(addr)) {
4034                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4035                         port_id);
4036                 return -EINVAL;
4037         }
4038         if (pool >= ETH_64_POOLS) {
4039                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
4040                 return -EINVAL;
4041         }
4042
4043         index = eth_dev_get_mac_addr_index(port_id, addr);
4044         if (index < 0) {
4045                 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4046                 if (index < 0) {
4047                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4048                                 port_id);
4049                         return -ENOSPC;
4050                 }
4051         } else {
4052                 pool_mask = dev->data->mac_pool_sel[index];
4053
4054                 /* Check if both MAC address and pool is already there, and do nothing */
4055                 if (pool_mask & (1ULL << pool))
4056                         return 0;
4057         }
4058
4059         /* Update NIC */
4060         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4061
4062         if (ret == 0) {
4063                 /* Update address in NIC data structure */
4064                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4065
4066                 /* Update pool bitmap in NIC data structure */
4067                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
4068         }
4069
4070         return eth_err(port_id, ret);
4071 }
4072
4073 int
4074 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4075 {
4076         struct rte_eth_dev *dev;
4077         int index;
4078
4079         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4080         dev = &rte_eth_devices[port_id];
4081         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4082
4083         index = eth_dev_get_mac_addr_index(port_id, addr);
4084         if (index == 0) {
4085                 RTE_ETHDEV_LOG(ERR,
4086                         "Port %u: Cannot remove default MAC address\n",
4087                         port_id);
4088                 return -EADDRINUSE;
4089         } else if (index < 0)
4090                 return 0;  /* Do nothing if address wasn't found */
4091
4092         /* Update NIC */
4093         (*dev->dev_ops->mac_addr_remove)(dev, index);
4094
4095         /* Update address in NIC data structure */
4096         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4097
4098         /* reset pool bitmap */
4099         dev->data->mac_pool_sel[index] = 0;
4100
4101         return 0;
4102 }
4103
4104 int
4105 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4106 {
4107         struct rte_eth_dev *dev;
4108         int ret;
4109
4110         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4111
4112         if (!rte_is_valid_assigned_ether_addr(addr))
4113                 return -EINVAL;
4114
4115         dev = &rte_eth_devices[port_id];
4116         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4117
4118         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4119         if (ret < 0)
4120                 return ret;
4121
4122         /* Update default address in NIC data structure */
4123         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4124
4125         return 0;
4126 }
4127
4128
4129 /*
4130  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4131  * an empty spot.
4132  */
4133 static int
4134 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4135                 const struct rte_ether_addr *addr)
4136 {
4137         struct rte_eth_dev_info dev_info;
4138         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4139         unsigned i;
4140         int ret;
4141
4142         ret = rte_eth_dev_info_get(port_id, &dev_info);
4143         if (ret != 0)
4144                 return -1;
4145
4146         if (!dev->data->hash_mac_addrs)
4147                 return -1;
4148
4149         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4150                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4151                         RTE_ETHER_ADDR_LEN) == 0)
4152                         return i;
4153
4154         return -1;
4155 }
4156
4157 int
4158 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4159                                 uint8_t on)
4160 {
4161         int index;
4162         int ret;
4163         struct rte_eth_dev *dev;
4164
4165         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4166
4167         dev = &rte_eth_devices[port_id];
4168         if (rte_is_zero_ether_addr(addr)) {
4169                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4170                         port_id);
4171                 return -EINVAL;
4172         }
4173
4174         index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4175         /* Check if it's already there, and do nothing */
4176         if ((index >= 0) && on)
4177                 return 0;
4178
4179         if (index < 0) {
4180                 if (!on) {
4181                         RTE_ETHDEV_LOG(ERR,
4182                                 "Port %u: the MAC address was not set in UTA\n",
4183                                 port_id);
4184                         return -EINVAL;
4185                 }
4186
4187                 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4188                 if (index < 0) {
4189                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4190                                 port_id);
4191                         return -ENOSPC;
4192                 }
4193         }
4194
4195         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4196         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4197         if (ret == 0) {
4198                 /* Update address in NIC data structure */
4199                 if (on)
4200                         rte_ether_addr_copy(addr,
4201                                         &dev->data->hash_mac_addrs[index]);
4202                 else
4203                         rte_ether_addr_copy(&null_mac_addr,
4204                                         &dev->data->hash_mac_addrs[index]);
4205         }
4206
4207         return eth_err(port_id, ret);
4208 }
4209
4210 int
4211 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4212 {
4213         struct rte_eth_dev *dev;
4214
4215         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4216
4217         dev = &rte_eth_devices[port_id];
4218
4219         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4220         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4221                                                                        on));
4222 }
4223
4224 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4225                                         uint16_t tx_rate)
4226 {
4227         struct rte_eth_dev *dev;
4228         struct rte_eth_dev_info dev_info;
4229         struct rte_eth_link link;
4230         int ret;
4231
4232         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4233
4234         ret = rte_eth_dev_info_get(port_id, &dev_info);
4235         if (ret != 0)
4236                 return ret;
4237
4238         dev = &rte_eth_devices[port_id];
4239         link = dev->data->dev_link;
4240
4241         if (queue_idx > dev_info.max_tx_queues) {
4242                 RTE_ETHDEV_LOG(ERR,
4243                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4244                         port_id, queue_idx);
4245                 return -EINVAL;
4246         }
4247
4248         if (tx_rate > link.link_speed) {
4249                 RTE_ETHDEV_LOG(ERR,
4250                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4251                         tx_rate, link.link_speed);
4252                 return -EINVAL;
4253         }
4254
4255         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4256         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4257                                                         queue_idx, tx_rate));
4258 }
4259
4260 int
4261 rte_eth_mirror_rule_set(uint16_t port_id,
4262                         struct rte_eth_mirror_conf *mirror_conf,
4263                         uint8_t rule_id, uint8_t on)
4264 {
4265         struct rte_eth_dev *dev;
4266
4267         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4268         if (mirror_conf->rule_type == 0) {
4269                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4270                 return -EINVAL;
4271         }
4272
4273         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4274                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4275                         ETH_64_POOLS - 1);
4276                 return -EINVAL;
4277         }
4278
4279         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4280              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4281             (mirror_conf->pool_mask == 0)) {
4282                 RTE_ETHDEV_LOG(ERR,
4283                         "Invalid mirror pool, pool mask can not be 0\n");
4284                 return -EINVAL;
4285         }
4286
4287         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4288             mirror_conf->vlan.vlan_mask == 0) {
4289                 RTE_ETHDEV_LOG(ERR,
4290                         "Invalid vlan mask, vlan mask can not be 0\n");
4291                 return -EINVAL;
4292         }
4293
4294         dev = &rte_eth_devices[port_id];
4295         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4296
4297         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4298                                                 mirror_conf, rule_id, on));
4299 }
4300
4301 int
4302 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4303 {
4304         struct rte_eth_dev *dev;
4305
4306         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4307
4308         dev = &rte_eth_devices[port_id];
4309         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4310
4311         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
4312                                                                    rule_id));
4313 }
4314
4315 RTE_INIT(eth_dev_init_cb_lists)
4316 {
4317         uint16_t i;
4318
4319         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4320                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4321 }
4322
4323 int
4324 rte_eth_dev_callback_register(uint16_t port_id,
4325                         enum rte_eth_event_type event,
4326                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4327 {
4328         struct rte_eth_dev *dev;
4329         struct rte_eth_dev_callback *user_cb;
4330         uint16_t next_port;
4331         uint16_t last_port;
4332
4333         if (!cb_fn)
4334                 return -EINVAL;
4335
4336         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4337                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4338                 return -EINVAL;
4339         }
4340
4341         if (port_id == RTE_ETH_ALL) {
4342                 next_port = 0;
4343                 last_port = RTE_MAX_ETHPORTS - 1;
4344         } else {
4345                 next_port = last_port = port_id;
4346         }
4347
4348         rte_spinlock_lock(&eth_dev_cb_lock);
4349
4350         do {
4351                 dev = &rte_eth_devices[next_port];
4352
4353                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4354                         if (user_cb->cb_fn == cb_fn &&
4355                                 user_cb->cb_arg == cb_arg &&
4356                                 user_cb->event == event) {
4357                                 break;
4358                         }
4359                 }
4360
4361                 /* create a new callback. */
4362                 if (user_cb == NULL) {
4363                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4364                                 sizeof(struct rte_eth_dev_callback), 0);
4365                         if (user_cb != NULL) {
4366                                 user_cb->cb_fn = cb_fn;
4367                                 user_cb->cb_arg = cb_arg;
4368                                 user_cb->event = event;
4369                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4370                                                   user_cb, next);
4371                         } else {
4372                                 rte_spinlock_unlock(&eth_dev_cb_lock);
4373                                 rte_eth_dev_callback_unregister(port_id, event,
4374                                                                 cb_fn, cb_arg);
4375                                 return -ENOMEM;
4376                         }
4377
4378                 }
4379         } while (++next_port <= last_port);
4380
4381         rte_spinlock_unlock(&eth_dev_cb_lock);
4382         return 0;
4383 }
4384
4385 int
4386 rte_eth_dev_callback_unregister(uint16_t port_id,
4387                         enum rte_eth_event_type event,
4388                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4389 {
4390         int ret;
4391         struct rte_eth_dev *dev;
4392         struct rte_eth_dev_callback *cb, *next;
4393         uint16_t next_port;
4394         uint16_t last_port;
4395
4396         if (!cb_fn)
4397                 return -EINVAL;
4398
4399         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4400                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4401                 return -EINVAL;
4402         }
4403
4404         if (port_id == RTE_ETH_ALL) {
4405                 next_port = 0;
4406                 last_port = RTE_MAX_ETHPORTS - 1;
4407         } else {
4408                 next_port = last_port = port_id;
4409         }
4410
4411         rte_spinlock_lock(&eth_dev_cb_lock);
4412
4413         do {
4414                 dev = &rte_eth_devices[next_port];
4415                 ret = 0;
4416                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4417                      cb = next) {
4418
4419                         next = TAILQ_NEXT(cb, next);
4420
4421                         if (cb->cb_fn != cb_fn || cb->event != event ||
4422                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4423                                 continue;
4424
4425                         /*
4426                          * if this callback is not executing right now,
4427                          * then remove it.
4428                          */
4429                         if (cb->active == 0) {
4430                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4431                                 rte_free(cb);
4432                         } else {
4433                                 ret = -EAGAIN;
4434                         }
4435                 }
4436         } while (++next_port <= last_port);
4437
4438         rte_spinlock_unlock(&eth_dev_cb_lock);
4439         return ret;
4440 }
4441
4442 int
4443 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4444         enum rte_eth_event_type event, void *ret_param)
4445 {
4446         struct rte_eth_dev_callback *cb_lst;
4447         struct rte_eth_dev_callback dev_cb;
4448         int rc = 0;
4449
4450         rte_spinlock_lock(&eth_dev_cb_lock);
4451         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4452                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4453                         continue;
4454                 dev_cb = *cb_lst;
4455                 cb_lst->active = 1;
4456                 if (ret_param != NULL)
4457                         dev_cb.ret_param = ret_param;
4458
4459                 rte_spinlock_unlock(&eth_dev_cb_lock);
4460                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4461                                 dev_cb.cb_arg, dev_cb.ret_param);
4462                 rte_spinlock_lock(&eth_dev_cb_lock);
4463                 cb_lst->active = 0;
4464         }
4465         rte_spinlock_unlock(&eth_dev_cb_lock);
4466         return rc;
4467 }
4468
4469 void
4470 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4471 {
4472         if (dev == NULL)
4473                 return;
4474
4475         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4476
4477         dev->state = RTE_ETH_DEV_ATTACHED;
4478 }
4479
4480 int
4481 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4482 {
4483         uint32_t vec;
4484         struct rte_eth_dev *dev;
4485         struct rte_intr_handle *intr_handle;
4486         uint16_t qid;
4487         int rc;
4488
4489         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4490
4491         dev = &rte_eth_devices[port_id];
4492
4493         if (!dev->intr_handle) {
4494                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4495                 return -ENOTSUP;
4496         }
4497
4498         intr_handle = dev->intr_handle;
4499         if (!intr_handle->intr_vec) {
4500                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4501                 return -EPERM;
4502         }
4503
4504         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4505                 vec = intr_handle->intr_vec[qid];
4506                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4507                 if (rc && rc != -EEXIST) {
4508                         RTE_ETHDEV_LOG(ERR,
4509                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4510                                 port_id, qid, op, epfd, vec);
4511                 }
4512         }
4513
4514         return 0;
4515 }
4516
4517 int
4518 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4519 {
4520         struct rte_intr_handle *intr_handle;
4521         struct rte_eth_dev *dev;
4522         unsigned int efd_idx;
4523         uint32_t vec;
4524         int fd;
4525
4526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4527
4528         dev = &rte_eth_devices[port_id];
4529
4530         if (queue_id >= dev->data->nb_rx_queues) {
4531                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4532                 return -1;
4533         }
4534
4535         if (!dev->intr_handle) {
4536                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4537                 return -1;
4538         }
4539
4540         intr_handle = dev->intr_handle;
4541         if (!intr_handle->intr_vec) {
4542                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4543                 return -1;
4544         }
4545
4546         vec = intr_handle->intr_vec[queue_id];
4547         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4548                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4549         fd = intr_handle->efds[efd_idx];
4550
4551         return fd;
4552 }
4553
4554 static inline int
4555 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4556                 const char *ring_name)
4557 {
4558         return snprintf(name, len, "eth_p%d_q%d_%s",
4559                         port_id, queue_id, ring_name);
4560 }
4561
4562 const struct rte_memzone *
4563 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4564                          uint16_t queue_id, size_t size, unsigned align,
4565                          int socket_id)
4566 {
4567         char z_name[RTE_MEMZONE_NAMESIZE];
4568         const struct rte_memzone *mz;
4569         int rc;
4570
4571         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4572                         queue_id, ring_name);
4573         if (rc >= RTE_MEMZONE_NAMESIZE) {
4574                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4575                 rte_errno = ENAMETOOLONG;
4576                 return NULL;
4577         }
4578
4579         mz = rte_memzone_lookup(z_name);
4580         if (mz) {
4581                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4582                                 size > mz->len ||
4583                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4584                         RTE_ETHDEV_LOG(ERR,
4585                                 "memzone %s does not justify the requested attributes\n",
4586                                 mz->name);
4587                         return NULL;
4588                 }
4589
4590                 return mz;
4591         }
4592
4593         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4594                         RTE_MEMZONE_IOVA_CONTIG, align);
4595 }
4596
4597 int
4598 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4599                 uint16_t queue_id)
4600 {
4601         char z_name[RTE_MEMZONE_NAMESIZE];
4602         const struct rte_memzone *mz;
4603         int rc = 0;
4604
4605         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4606                         queue_id, ring_name);
4607         if (rc >= RTE_MEMZONE_NAMESIZE) {
4608                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4609                 return -ENAMETOOLONG;
4610         }
4611
4612         mz = rte_memzone_lookup(z_name);
4613         if (mz)
4614                 rc = rte_memzone_free(mz);
4615         else
4616                 rc = -ENOENT;
4617
4618         return rc;
4619 }
4620
4621 int
4622 rte_eth_dev_create(struct rte_device *device, const char *name,
4623         size_t priv_data_size,
4624         ethdev_bus_specific_init ethdev_bus_specific_init,
4625         void *bus_init_params,
4626         ethdev_init_t ethdev_init, void *init_params)
4627 {
4628         struct rte_eth_dev *ethdev;
4629         int retval;
4630
4631         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4632
4633         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4634                 ethdev = rte_eth_dev_allocate(name);
4635                 if (!ethdev)
4636                         return -ENODEV;
4637
4638                 if (priv_data_size) {
4639                         ethdev->data->dev_private = rte_zmalloc_socket(
4640                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4641                                 device->numa_node);
4642
4643                         if (!ethdev->data->dev_private) {
4644                                 RTE_ETHDEV_LOG(ERR,
4645                                         "failed to allocate private data\n");
4646                                 retval = -ENOMEM;
4647                                 goto probe_failed;
4648                         }
4649                 }
4650         } else {
4651                 ethdev = rte_eth_dev_attach_secondary(name);
4652                 if (!ethdev) {
4653                         RTE_ETHDEV_LOG(ERR,
4654                                 "secondary process attach failed, ethdev doesn't exist\n");
4655                         return  -ENODEV;
4656                 }
4657         }
4658
4659         ethdev->device = device;
4660
4661         if (ethdev_bus_specific_init) {
4662                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4663                 if (retval) {
4664                         RTE_ETHDEV_LOG(ERR,
4665                                 "ethdev bus specific initialisation failed\n");
4666                         goto probe_failed;
4667                 }
4668         }
4669
4670         retval = ethdev_init(ethdev, init_params);
4671         if (retval) {
4672                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4673                 goto probe_failed;
4674         }
4675
4676         rte_eth_dev_probing_finish(ethdev);
4677
4678         return retval;
4679
4680 probe_failed:
4681         rte_eth_dev_release_port(ethdev);
4682         return retval;
4683 }
4684
4685 int
4686 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4687         ethdev_uninit_t ethdev_uninit)
4688 {
4689         int ret;
4690
4691         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4692         if (!ethdev)
4693                 return -ENODEV;
4694
4695         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4696
4697         ret = ethdev_uninit(ethdev);
4698         if (ret)
4699                 return ret;
4700
4701         return rte_eth_dev_release_port(ethdev);
4702 }
4703
4704 int
4705 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4706                           int epfd, int op, void *data)
4707 {
4708         uint32_t vec;
4709         struct rte_eth_dev *dev;
4710         struct rte_intr_handle *intr_handle;
4711         int rc;
4712
4713         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4714
4715         dev = &rte_eth_devices[port_id];
4716         if (queue_id >= dev->data->nb_rx_queues) {
4717                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4718                 return -EINVAL;
4719         }
4720
4721         if (!dev->intr_handle) {
4722                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4723                 return -ENOTSUP;
4724         }
4725
4726         intr_handle = dev->intr_handle;
4727         if (!intr_handle->intr_vec) {
4728                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4729                 return -EPERM;
4730         }
4731
4732         vec = intr_handle->intr_vec[queue_id];
4733         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4734         if (rc && rc != -EEXIST) {
4735                 RTE_ETHDEV_LOG(ERR,
4736                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4737                         port_id, queue_id, op, epfd, vec);
4738                 return rc;
4739         }
4740
4741         return 0;
4742 }
4743
4744 int
4745 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4746                            uint16_t queue_id)
4747 {
4748         struct rte_eth_dev *dev;
4749         int ret;
4750
4751         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4752
4753         dev = &rte_eth_devices[port_id];
4754
4755         ret = eth_dev_validate_rx_queue(dev, queue_id);
4756         if (ret != 0)
4757                 return ret;
4758
4759         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4760         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4761                                                                 queue_id));
4762 }
4763
4764 int
4765 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4766                             uint16_t queue_id)
4767 {
4768         struct rte_eth_dev *dev;
4769         int ret;
4770
4771         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4772
4773         dev = &rte_eth_devices[port_id];
4774
4775         ret = eth_dev_validate_rx_queue(dev, queue_id);
4776         if (ret != 0)
4777                 return ret;
4778
4779         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4780         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4781                                                                 queue_id));
4782 }
4783
4784
4785 const struct rte_eth_rxtx_callback *
4786 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4787                 rte_rx_callback_fn fn, void *user_param)
4788 {
4789 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4790         rte_errno = ENOTSUP;
4791         return NULL;
4792 #endif
4793         struct rte_eth_dev *dev;
4794
4795         /* check input parameters */
4796         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4797                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4798                 rte_errno = EINVAL;
4799                 return NULL;
4800         }
4801         dev = &rte_eth_devices[port_id];
4802         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4803                 rte_errno = EINVAL;
4804                 return NULL;
4805         }
4806         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4807
4808         if (cb == NULL) {
4809                 rte_errno = ENOMEM;
4810                 return NULL;
4811         }
4812
4813         cb->fn.rx = fn;
4814         cb->param = user_param;
4815
4816         rte_spinlock_lock(&eth_dev_rx_cb_lock);
4817         /* Add the callbacks in fifo order. */
4818         struct rte_eth_rxtx_callback *tail =
4819                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4820
4821         if (!tail) {
4822                 /* Stores to cb->fn and cb->param should complete before
4823                  * cb is visible to data plane.
4824                  */
4825                 __atomic_store_n(
4826                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4827                         cb, __ATOMIC_RELEASE);
4828
4829         } else {
4830                 while (tail->next)
4831                         tail = tail->next;
4832                 /* Stores to cb->fn and cb->param should complete before
4833                  * cb is visible to data plane.
4834                  */
4835                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4836         }
4837         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4838
4839         return cb;
4840 }
4841
4842 const struct rte_eth_rxtx_callback *
4843 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4844                 rte_rx_callback_fn fn, void *user_param)
4845 {
4846 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4847         rte_errno = ENOTSUP;
4848         return NULL;
4849 #endif
4850         /* check input parameters */
4851         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4852                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4853                 rte_errno = EINVAL;
4854                 return NULL;
4855         }
4856
4857         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4858
4859         if (cb == NULL) {
4860                 rte_errno = ENOMEM;
4861                 return NULL;
4862         }
4863
4864         cb->fn.rx = fn;
4865         cb->param = user_param;
4866
4867         rte_spinlock_lock(&eth_dev_rx_cb_lock);
4868         /* Add the callbacks at first position */
4869         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4870         /* Stores to cb->fn, cb->param and cb->next should complete before
4871          * cb is visible to data plane threads.
4872          */
4873         __atomic_store_n(
4874                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4875                 cb, __ATOMIC_RELEASE);
4876         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4877
4878         return cb;
4879 }
4880
4881 const struct rte_eth_rxtx_callback *
4882 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4883                 rte_tx_callback_fn fn, void *user_param)
4884 {
4885 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4886         rte_errno = ENOTSUP;
4887         return NULL;
4888 #endif
4889         struct rte_eth_dev *dev;
4890
4891         /* check input parameters */
4892         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4893                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4894                 rte_errno = EINVAL;
4895                 return NULL;
4896         }
4897
4898         dev = &rte_eth_devices[port_id];
4899         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4900                 rte_errno = EINVAL;
4901                 return NULL;
4902         }
4903
4904         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4905
4906         if (cb == NULL) {
4907                 rte_errno = ENOMEM;
4908                 return NULL;
4909         }
4910
4911         cb->fn.tx = fn;
4912         cb->param = user_param;
4913
4914         rte_spinlock_lock(&eth_dev_tx_cb_lock);
4915         /* Add the callbacks in fifo order. */
4916         struct rte_eth_rxtx_callback *tail =
4917                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4918
4919         if (!tail) {
4920                 /* Stores to cb->fn and cb->param should complete before
4921                  * cb is visible to data plane.
4922                  */
4923                 __atomic_store_n(
4924                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4925                         cb, __ATOMIC_RELEASE);
4926
4927         } else {
4928                 while (tail->next)
4929                         tail = tail->next;
4930                 /* Stores to cb->fn and cb->param should complete before
4931                  * cb is visible to data plane.
4932                  */
4933                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4934         }
4935         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4936
4937         return cb;
4938 }
4939
4940 int
4941 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4942                 const struct rte_eth_rxtx_callback *user_cb)
4943 {
4944 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4945         return -ENOTSUP;
4946 #endif
4947         /* Check input parameters. */
4948         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4949         if (user_cb == NULL ||
4950                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4951                 return -EINVAL;
4952
4953         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4954         struct rte_eth_rxtx_callback *cb;
4955         struct rte_eth_rxtx_callback **prev_cb;
4956         int ret = -EINVAL;
4957
4958         rte_spinlock_lock(&eth_dev_rx_cb_lock);
4959         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4960         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4961                 cb = *prev_cb;
4962                 if (cb == user_cb) {
4963                         /* Remove the user cb from the callback list. */
4964                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4965                         ret = 0;
4966                         break;
4967                 }
4968         }
4969         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4970
4971         return ret;
4972 }
4973
4974 int
4975 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4976                 const struct rte_eth_rxtx_callback *user_cb)
4977 {
4978 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4979         return -ENOTSUP;
4980 #endif
4981         /* Check input parameters. */
4982         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4983         if (user_cb == NULL ||
4984                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4985                 return -EINVAL;
4986
4987         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4988         int ret = -EINVAL;
4989         struct rte_eth_rxtx_callback *cb;
4990         struct rte_eth_rxtx_callback **prev_cb;
4991
4992         rte_spinlock_lock(&eth_dev_tx_cb_lock);
4993         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4994         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4995                 cb = *prev_cb;
4996                 if (cb == user_cb) {
4997                         /* Remove the user cb from the callback list. */
4998                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4999                         ret = 0;
5000                         break;
5001                 }
5002         }
5003         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5004
5005         return ret;
5006 }
5007
5008 int
5009 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5010         struct rte_eth_rxq_info *qinfo)
5011 {
5012         struct rte_eth_dev *dev;
5013
5014         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5015
5016         if (qinfo == NULL)
5017                 return -EINVAL;
5018
5019         dev = &rte_eth_devices[port_id];
5020         if (queue_id >= dev->data->nb_rx_queues) {
5021                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5022                 return -EINVAL;
5023         }
5024
5025         if (dev->data->rx_queues == NULL ||
5026                         dev->data->rx_queues[queue_id] == NULL) {
5027                 RTE_ETHDEV_LOG(ERR,
5028                                "Rx queue %"PRIu16" of device with port_id=%"
5029                                PRIu16" has not been setup\n",
5030                                queue_id, port_id);
5031                 return -EINVAL;
5032         }
5033
5034         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5035                 RTE_ETHDEV_LOG(INFO,
5036                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5037                         queue_id, port_id);
5038                 return -EINVAL;
5039         }
5040
5041         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5042
5043         memset(qinfo, 0, sizeof(*qinfo));
5044         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5045         return 0;
5046 }
5047
5048 int
5049 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5050         struct rte_eth_txq_info *qinfo)
5051 {
5052         struct rte_eth_dev *dev;
5053
5054         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5055
5056         if (qinfo == NULL)
5057                 return -EINVAL;
5058
5059         dev = &rte_eth_devices[port_id];
5060         if (queue_id >= dev->data->nb_tx_queues) {
5061                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5062                 return -EINVAL;
5063         }
5064
5065         if (dev->data->tx_queues == NULL ||
5066                         dev->data->tx_queues[queue_id] == NULL) {
5067                 RTE_ETHDEV_LOG(ERR,
5068                                "Tx queue %"PRIu16" of device with port_id=%"
5069                                PRIu16" has not been setup\n",
5070                                queue_id, port_id);
5071                 return -EINVAL;
5072         }
5073
5074         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5075                 RTE_ETHDEV_LOG(INFO,
5076                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5077                         queue_id, port_id);
5078                 return -EINVAL;
5079         }
5080
5081         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5082
5083         memset(qinfo, 0, sizeof(*qinfo));
5084         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5085
5086         return 0;
5087 }
5088
5089 int
5090 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5091                           struct rte_eth_burst_mode *mode)
5092 {
5093         struct rte_eth_dev *dev;
5094
5095         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5096
5097         if (mode == NULL)
5098                 return -EINVAL;
5099
5100         dev = &rte_eth_devices[port_id];
5101
5102         if (queue_id >= dev->data->nb_rx_queues) {
5103                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5104                 return -EINVAL;
5105         }
5106
5107         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5108         memset(mode, 0, sizeof(*mode));
5109         return eth_err(port_id,
5110                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5111 }
5112
5113 int
5114 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5115                           struct rte_eth_burst_mode *mode)
5116 {
5117         struct rte_eth_dev *dev;
5118
5119         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5120
5121         if (mode == NULL)
5122                 return -EINVAL;
5123
5124         dev = &rte_eth_devices[port_id];
5125
5126         if (queue_id >= dev->data->nb_tx_queues) {
5127                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5128                 return -EINVAL;
5129         }
5130
5131         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5132         memset(mode, 0, sizeof(*mode));
5133         return eth_err(port_id,
5134                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5135 }
5136
5137 int
5138 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5139                 struct rte_power_monitor_cond *pmc)
5140 {
5141         struct rte_eth_dev *dev;
5142
5143         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5144
5145         dev = &rte_eth_devices[port_id];
5146
5147         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
5148
5149         if (queue_id >= dev->data->nb_rx_queues) {
5150                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5151                 return -EINVAL;
5152         }
5153
5154         if (pmc == NULL) {
5155                 RTE_ETHDEV_LOG(ERR, "Invalid power monitor condition=%p\n",
5156                                 pmc);
5157                 return -EINVAL;
5158         }
5159
5160         return eth_err(port_id,
5161                 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id],
5162                         pmc));
5163 }
5164
5165 int
5166 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5167                              struct rte_ether_addr *mc_addr_set,
5168                              uint32_t nb_mc_addr)
5169 {
5170         struct rte_eth_dev *dev;
5171
5172         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5173
5174         dev = &rte_eth_devices[port_id];
5175         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5176         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5177                                                 mc_addr_set, nb_mc_addr));
5178 }
5179
5180 int
5181 rte_eth_timesync_enable(uint16_t port_id)
5182 {
5183         struct rte_eth_dev *dev;
5184
5185         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5186         dev = &rte_eth_devices[port_id];
5187
5188         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5189         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5190 }
5191
5192 int
5193 rte_eth_timesync_disable(uint16_t port_id)
5194 {
5195         struct rte_eth_dev *dev;
5196
5197         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5198         dev = &rte_eth_devices[port_id];
5199
5200         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5201         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5202 }
5203
5204 int
5205 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5206                                    uint32_t flags)
5207 {
5208         struct rte_eth_dev *dev;
5209
5210         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5211         dev = &rte_eth_devices[port_id];
5212
5213         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5214         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5215                                 (dev, timestamp, flags));
5216 }
5217
5218 int
5219 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5220                                    struct timespec *timestamp)
5221 {
5222         struct rte_eth_dev *dev;
5223
5224         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5225         dev = &rte_eth_devices[port_id];
5226
5227         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5228         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5229                                 (dev, timestamp));
5230 }
5231
5232 int
5233 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5234 {
5235         struct rte_eth_dev *dev;
5236
5237         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5238         dev = &rte_eth_devices[port_id];
5239
5240         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5241         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
5242                                                                       delta));
5243 }
5244
5245 int
5246 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5247 {
5248         struct rte_eth_dev *dev;
5249
5250         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5251         dev = &rte_eth_devices[port_id];
5252
5253         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5254         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5255                                                                 timestamp));
5256 }
5257
5258 int
5259 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5260 {
5261         struct rte_eth_dev *dev;
5262
5263         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5264         dev = &rte_eth_devices[port_id];
5265
5266         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5267         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5268                                                                 timestamp));
5269 }
5270
5271 int
5272 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5273 {
5274         struct rte_eth_dev *dev;
5275
5276         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5277         dev = &rte_eth_devices[port_id];
5278
5279         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5280         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5281 }
5282
5283 int
5284 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5285 {
5286         struct rte_eth_dev *dev;
5287
5288         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5289         if (info == NULL)
5290                 return -EINVAL;
5291
5292         dev = &rte_eth_devices[port_id];
5293         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5294         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5295 }
5296
5297 int
5298 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5299 {
5300         struct rte_eth_dev *dev;
5301
5302         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5303
5304         dev = &rte_eth_devices[port_id];
5305         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5306         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5307 }
5308
5309 int
5310 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5311 {
5312         struct rte_eth_dev *dev;
5313
5314         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5315         if (info == NULL)
5316                 return -EINVAL;
5317
5318         dev = &rte_eth_devices[port_id];
5319         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5320         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5321 }
5322
5323 int
5324 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5325 {
5326         struct rte_eth_dev *dev;
5327
5328         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5329         if (info == NULL)
5330                 return -EINVAL;
5331
5332         dev = &rte_eth_devices[port_id];
5333         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5334         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5335 }
5336
5337 int
5338 rte_eth_dev_get_module_info(uint16_t port_id,
5339                             struct rte_eth_dev_module_info *modinfo)
5340 {
5341         struct rte_eth_dev *dev;
5342
5343         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5344         if (modinfo == NULL)
5345                 return -EINVAL;
5346
5347         dev = &rte_eth_devices[port_id];
5348         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5349         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5350 }
5351
5352 int
5353 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5354                               struct rte_dev_eeprom_info *info)
5355 {
5356         struct rte_eth_dev *dev;
5357
5358         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5359         if (info == NULL || info->data == NULL || info->length == 0)
5360                 return -EINVAL;
5361
5362         dev = &rte_eth_devices[port_id];
5363         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5364         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5365 }
5366
5367 int
5368 rte_eth_dev_get_dcb_info(uint16_t port_id,
5369                              struct rte_eth_dcb_info *dcb_info)
5370 {
5371         struct rte_eth_dev *dev;
5372
5373         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5374
5375         dev = &rte_eth_devices[port_id];
5376         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5377
5378         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5379         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5380 }
5381
5382 static void
5383 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5384                 const struct rte_eth_desc_lim *desc_lim)
5385 {
5386         if (desc_lim->nb_align != 0)
5387                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5388
5389         if (desc_lim->nb_max != 0)
5390                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5391
5392         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5393 }
5394
5395 int
5396 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5397                                  uint16_t *nb_rx_desc,
5398                                  uint16_t *nb_tx_desc)
5399 {
5400         struct rte_eth_dev_info dev_info;
5401         int ret;
5402
5403         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5404
5405         ret = rte_eth_dev_info_get(port_id, &dev_info);
5406         if (ret != 0)
5407                 return ret;
5408
5409         if (nb_rx_desc != NULL)
5410                 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5411
5412         if (nb_tx_desc != NULL)
5413                 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5414
5415         return 0;
5416 }
5417
5418 int
5419 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5420                                    struct rte_eth_hairpin_cap *cap)
5421 {
5422         struct rte_eth_dev *dev;
5423
5424         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5425
5426         dev = &rte_eth_devices[port_id];
5427         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5428         memset(cap, 0, sizeof(*cap));
5429         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5430 }
5431
5432 int
5433 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5434 {
5435         if (dev->data->rx_queue_state[queue_id] ==
5436             RTE_ETH_QUEUE_STATE_HAIRPIN)
5437                 return 1;
5438         return 0;
5439 }
5440
5441 int
5442 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5443 {
5444         if (dev->data->tx_queue_state[queue_id] ==
5445             RTE_ETH_QUEUE_STATE_HAIRPIN)
5446                 return 1;
5447         return 0;
5448 }
5449
5450 int
5451 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5452 {
5453         struct rte_eth_dev *dev;
5454
5455         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5456
5457         if (pool == NULL)
5458                 return -EINVAL;
5459
5460         dev = &rte_eth_devices[port_id];
5461
5462         if (*dev->dev_ops->pool_ops_supported == NULL)
5463                 return 1; /* all pools are supported */
5464
5465         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5466 }
5467
5468 /**
5469  * A set of values to describe the possible states of a switch domain.
5470  */
5471 enum rte_eth_switch_domain_state {
5472         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5473         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5474 };
5475
5476 /**
5477  * Array of switch domains available for allocation. Array is sized to
5478  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5479  * ethdev ports in a single process.
5480  */
5481 static struct rte_eth_dev_switch {
5482         enum rte_eth_switch_domain_state state;
5483 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
5484
5485 int
5486 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5487 {
5488         uint16_t i;
5489
5490         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5491
5492         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5493                 if (eth_dev_switch_domains[i].state ==
5494                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5495                         eth_dev_switch_domains[i].state =
5496                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5497                         *domain_id = i;
5498                         return 0;
5499                 }
5500         }
5501
5502         return -ENOSPC;
5503 }
5504
5505 int
5506 rte_eth_switch_domain_free(uint16_t domain_id)
5507 {
5508         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5509                 domain_id >= RTE_MAX_ETHPORTS)
5510                 return -EINVAL;
5511
5512         if (eth_dev_switch_domains[domain_id].state !=
5513                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5514                 return -EINVAL;
5515
5516         eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5517
5518         return 0;
5519 }
5520
5521 static int
5522 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5523 {
5524         int state;
5525         struct rte_kvargs_pair *pair;
5526         char *letter;
5527
5528         arglist->str = strdup(str_in);
5529         if (arglist->str == NULL)
5530                 return -ENOMEM;
5531
5532         letter = arglist->str;
5533         state = 0;
5534         arglist->count = 0;
5535         pair = &arglist->pairs[0];
5536         while (1) {
5537                 switch (state) {
5538                 case 0: /* Initial */
5539                         if (*letter == '=')
5540                                 return -EINVAL;
5541                         else if (*letter == '\0')
5542                                 return 0;
5543
5544                         state = 1;
5545                         pair->key = letter;
5546                         /* fall-thru */
5547
5548                 case 1: /* Parsing key */
5549                         if (*letter == '=') {
5550                                 *letter = '\0';
5551                                 pair->value = letter + 1;
5552                                 state = 2;
5553                         } else if (*letter == ',' || *letter == '\0')
5554                                 return -EINVAL;
5555                         break;
5556
5557
5558                 case 2: /* Parsing value */
5559                         if (*letter == '[')
5560                                 state = 3;
5561                         else if (*letter == ',') {
5562                                 *letter = '\0';
5563                                 arglist->count++;
5564                                 pair = &arglist->pairs[arglist->count];
5565                                 state = 0;
5566                         } else if (*letter == '\0') {
5567                                 letter--;
5568                                 arglist->count++;
5569                                 pair = &arglist->pairs[arglist->count];
5570                                 state = 0;
5571                         }
5572                         break;
5573
5574                 case 3: /* Parsing list */
5575                         if (*letter == ']')
5576                                 state = 2;
5577                         else if (*letter == '\0')
5578                                 return -EINVAL;
5579                         break;
5580                 }
5581                 letter++;
5582         }
5583 }
5584
5585 int
5586 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5587 {
5588         struct rte_kvargs args;
5589         struct rte_kvargs_pair *pair;
5590         unsigned int i;
5591         int result = 0;
5592
5593         memset(eth_da, 0, sizeof(*eth_da));
5594
5595         result = eth_dev_devargs_tokenise(&args, dargs);
5596         if (result < 0)
5597                 goto parse_cleanup;
5598
5599         for (i = 0; i < args.count; i++) {
5600                 pair = &args.pairs[i];
5601                 if (strcmp("representor", pair->key) == 0) {
5602                         if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
5603                                 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
5604                                         dargs);
5605                                 result = -1;
5606                                 goto parse_cleanup;
5607                         }
5608                         result = rte_eth_devargs_parse_representor_ports(
5609                                         pair->value, eth_da);
5610                         if (result < 0)
5611                                 goto parse_cleanup;
5612                 }
5613         }
5614
5615 parse_cleanup:
5616         if (args.str)
5617                 free(args.str);
5618
5619         return result;
5620 }
5621
5622 int
5623 rte_eth_representor_id_get(const struct rte_eth_dev *ethdev,
5624                            enum rte_eth_representor_type type,
5625                            int controller, int pf, int representor_port,
5626                            uint16_t *repr_id)
5627 {
5628         int ret, n, i, count;
5629         struct rte_eth_representor_info *info = NULL;
5630         size_t size;
5631
5632         if (type == RTE_ETH_REPRESENTOR_NONE)
5633                 return 0;
5634         if (repr_id == NULL)
5635                 return -EINVAL;
5636
5637         /* Get PMD representor range info. */
5638         ret = rte_eth_representor_info_get(ethdev->data->port_id, NULL);
5639         if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
5640             controller == -1 && pf == -1) {
5641                 /* Direct mapping for legacy VF representor. */
5642                 *repr_id = representor_port;
5643                 return 0;
5644         } else if (ret < 0) {
5645                 return ret;
5646         }
5647         n = ret;
5648         size = sizeof(*info) + n * sizeof(info->ranges[0]);
5649         info = calloc(1, size);
5650         if (info == NULL)
5651                 return -ENOMEM;
5652         ret = rte_eth_representor_info_get(ethdev->data->port_id, info);
5653         if (ret < 0)
5654                 goto out;
5655
5656         /* Default controller and pf to caller. */
5657         if (controller == -1)
5658                 controller = info->controller;
5659         if (pf == -1)
5660                 pf = info->pf;
5661
5662         /* Locate representor ID. */
5663         ret = -ENOENT;
5664         for (i = 0; i < n; ++i) {
5665                 if (info->ranges[i].type != type)
5666                         continue;
5667                 if (info->ranges[i].controller != controller)
5668                         continue;
5669                 if (info->ranges[i].id_end < info->ranges[i].id_base) {
5670                         RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
5671                                 ethdev->data->port_id, info->ranges[i].id_base,
5672                                 info->ranges[i].id_end, i);
5673                         continue;
5674
5675                 }
5676                 count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
5677                 switch (info->ranges[i].type) {
5678                 case RTE_ETH_REPRESENTOR_PF:
5679                         if (pf < info->ranges[i].pf ||
5680                             pf >= info->ranges[i].pf + count)
5681                                 continue;
5682                         *repr_id = info->ranges[i].id_base +
5683                                    (pf - info->ranges[i].pf);
5684                         ret = 0;
5685                         goto out;
5686                 case RTE_ETH_REPRESENTOR_VF:
5687                         if (info->ranges[i].pf != pf)
5688                                 continue;
5689                         if (representor_port < info->ranges[i].vf ||
5690                             representor_port >= info->ranges[i].vf + count)
5691                                 continue;
5692                         *repr_id = info->ranges[i].id_base +
5693                                    (representor_port - info->ranges[i].vf);
5694                         ret = 0;
5695                         goto out;
5696                 case RTE_ETH_REPRESENTOR_SF:
5697                         if (info->ranges[i].pf != pf)
5698                                 continue;
5699                         if (representor_port < info->ranges[i].sf ||
5700                             representor_port >= info->ranges[i].sf + count)
5701                                 continue;
5702                         *repr_id = info->ranges[i].id_base +
5703                               (representor_port - info->ranges[i].sf);
5704                         ret = 0;
5705                         goto out;
5706                 default:
5707                         break;
5708                 }
5709         }
5710 out:
5711         free(info);
5712         return ret;
5713 }
5714
5715 static int
5716 eth_dev_handle_port_list(const char *cmd __rte_unused,
5717                 const char *params __rte_unused,
5718                 struct rte_tel_data *d)
5719 {
5720         int port_id;
5721
5722         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5723         RTE_ETH_FOREACH_DEV(port_id)
5724                 rte_tel_data_add_array_int(d, port_id);
5725         return 0;
5726 }
5727
5728 static void
5729 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5730                 const char *stat_name)
5731 {
5732         int q;
5733         struct rte_tel_data *q_data = rte_tel_data_alloc();
5734         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5735         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5736                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
5737         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5738 }
5739
5740 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5741
5742 static int
5743 eth_dev_handle_port_stats(const char *cmd __rte_unused,
5744                 const char *params,
5745                 struct rte_tel_data *d)
5746 {
5747         struct rte_eth_stats stats;
5748         int port_id, ret;
5749
5750         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5751                 return -1;
5752
5753         port_id = atoi(params);
5754         if (!rte_eth_dev_is_valid_port(port_id))
5755                 return -1;
5756
5757         ret = rte_eth_stats_get(port_id, &stats);
5758         if (ret < 0)
5759                 return -1;
5760
5761         rte_tel_data_start_dict(d);
5762         ADD_DICT_STAT(stats, ipackets);
5763         ADD_DICT_STAT(stats, opackets);
5764         ADD_DICT_STAT(stats, ibytes);
5765         ADD_DICT_STAT(stats, obytes);
5766         ADD_DICT_STAT(stats, imissed);
5767         ADD_DICT_STAT(stats, ierrors);
5768         ADD_DICT_STAT(stats, oerrors);
5769         ADD_DICT_STAT(stats, rx_nombuf);
5770         eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5771         eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5772         eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5773         eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5774         eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
5775
5776         return 0;
5777 }
5778
5779 static int
5780 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
5781                 const char *params,
5782                 struct rte_tel_data *d)
5783 {
5784         struct rte_eth_xstat *eth_xstats;
5785         struct rte_eth_xstat_name *xstat_names;
5786         int port_id, num_xstats;
5787         int i, ret;
5788         char *end_param;
5789
5790         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5791                 return -1;
5792
5793         port_id = strtoul(params, &end_param, 0);
5794         if (*end_param != '\0')
5795                 RTE_ETHDEV_LOG(NOTICE,
5796                         "Extra parameters passed to ethdev telemetry command, ignoring");
5797         if (!rte_eth_dev_is_valid_port(port_id))
5798                 return -1;
5799
5800         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5801         if (num_xstats < 0)
5802                 return -1;
5803
5804         /* use one malloc for both names and stats */
5805         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5806                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
5807         if (eth_xstats == NULL)
5808                 return -1;
5809         xstat_names = (void *)&eth_xstats[num_xstats];
5810
5811         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5812         if (ret < 0 || ret > num_xstats) {
5813                 free(eth_xstats);
5814                 return -1;
5815         }
5816
5817         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5818         if (ret < 0 || ret > num_xstats) {
5819                 free(eth_xstats);
5820                 return -1;
5821         }
5822
5823         rte_tel_data_start_dict(d);
5824         for (i = 0; i < num_xstats; i++)
5825                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5826                                 eth_xstats[i].value);
5827         return 0;
5828 }
5829
5830 static int
5831 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
5832                 const char *params,
5833                 struct rte_tel_data *d)
5834 {
5835         static const char *status_str = "status";
5836         int ret, port_id;
5837         struct rte_eth_link link;
5838         char *end_param;
5839
5840         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5841                 return -1;
5842
5843         port_id = strtoul(params, &end_param, 0);
5844         if (*end_param != '\0')
5845                 RTE_ETHDEV_LOG(NOTICE,
5846                         "Extra parameters passed to ethdev telemetry command, ignoring");
5847         if (!rte_eth_dev_is_valid_port(port_id))
5848                 return -1;
5849
5850         ret = rte_eth_link_get_nowait(port_id, &link);
5851         if (ret < 0)
5852                 return -1;
5853
5854         rte_tel_data_start_dict(d);
5855         if (!link.link_status) {
5856                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
5857                 return 0;
5858         }
5859         rte_tel_data_add_dict_string(d, status_str, "UP");
5860         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5861         rte_tel_data_add_dict_string(d, "duplex",
5862                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
5863                                 "full-duplex" : "half-duplex");
5864         return 0;
5865 }
5866
5867 int
5868 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
5869                                   struct rte_hairpin_peer_info *cur_info,
5870                                   struct rte_hairpin_peer_info *peer_info,
5871                                   uint32_t direction)
5872 {
5873         struct rte_eth_dev *dev;
5874
5875         /* Current queue information is not mandatory. */
5876         if (peer_info == NULL)
5877                 return -EINVAL;
5878
5879         /* No need to check the validity again. */
5880         dev = &rte_eth_devices[peer_port];
5881         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
5882                                 -ENOTSUP);
5883
5884         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
5885                                         cur_info, peer_info, direction);
5886 }
5887
5888 int
5889 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
5890                                 struct rte_hairpin_peer_info *peer_info,
5891                                 uint32_t direction)
5892 {
5893         struct rte_eth_dev *dev;
5894
5895         if (peer_info == NULL)
5896                 return -EINVAL;
5897
5898         /* No need to check the validity again. */
5899         dev = &rte_eth_devices[cur_port];
5900         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
5901                                 -ENOTSUP);
5902
5903         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
5904                                                         peer_info, direction);
5905 }
5906
5907 int
5908 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
5909                                   uint32_t direction)
5910 {
5911         struct rte_eth_dev *dev;
5912
5913         /* No need to check the validity again. */
5914         dev = &rte_eth_devices[cur_port];
5915         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
5916                                 -ENOTSUP);
5917
5918         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
5919                                                           direction);
5920 }
5921
5922 int
5923 rte_eth_representor_info_get(uint16_t port_id,
5924                              struct rte_eth_representor_info *info)
5925 {
5926         struct rte_eth_dev *dev;
5927
5928         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5929         dev = &rte_eth_devices[port_id];
5930
5931         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
5932         return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev,
5933                                                                       info));
5934 }
5935
5936 RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
5937
5938 RTE_INIT(ethdev_init_telemetry)
5939 {
5940         rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
5941                         "Returns list of available ethdev ports. Takes no parameters");
5942         rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
5943                         "Returns the common stats for a port. Parameters: int port_id");
5944         rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
5945                         "Returns the extended stats for a port. Parameters: int port_id");
5946         rte_telemetry_register_cmd("/ethdev/link_status",
5947                         eth_dev_handle_port_link_status,
5948                         "Returns the link status for a port. Parameters: int port_id");
5949 }