ethdev: add dev configured flag
[dpdk.git] / lib / ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13
14 #include <rte_byteorder.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_interrupts.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_memzone.h>
21 #include <rte_launch.h>
22 #include <rte_eal.h>
23 #include <rte_per_lcore.h>
24 #include <rte_lcore.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_common.h>
27 #include <rte_mempool.h>
28 #include <rte_malloc.h>
29 #include <rte_mbuf.h>
30 #include <rte_errno.h>
31 #include <rte_spinlock.h>
32 #include <rte_string_fns.h>
33 #include <rte_kvargs.h>
34 #include <rte_class.h>
35 #include <rte_ether.h>
36 #include <rte_telemetry.h>
37
38 #include "rte_ethdev_trace.h"
39 #include "rte_ethdev.h"
40 #include "ethdev_driver.h"
41 #include "ethdev_profile.h"
42 #include "ethdev_private.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46
47 /* spinlock for eth device callbacks */
48 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* spinlock for add/remove rx callbacks */
51 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
52
53 /* spinlock for add/remove tx callbacks */
54 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for shared data allocation */
57 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* store statistics names and its offset in stats structure  */
60 struct rte_eth_xstats_name_off {
61         char name[RTE_ETH_XSTATS_NAME_SIZE];
62         unsigned offset;
63 };
64
65 /* Shared memory between primary and secondary processes. */
66 static struct {
67         uint64_t next_owner_id;
68         rte_spinlock_t ownership_lock;
69         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
70 } *eth_dev_shared_data;
71
72 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
73         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
74         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
75         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
76         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
77         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
78         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
79         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
80         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
81                 rx_nombuf)},
82 };
83
84 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
85
86 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
87         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
88         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
89         {"errors", offsetof(struct rte_eth_stats, q_errors)},
90 };
91
92 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
93
94 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
95         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
96         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
97 };
98 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
99
100 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
101         { DEV_RX_OFFLOAD_##_name, #_name }
102
103 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)       \
104         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
105
106 static const struct {
107         uint64_t offload;
108         const char *name;
109 } eth_dev_rx_offload_names[] = {
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
111         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
112         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
113         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
115         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
119         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
120         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
121         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
122         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
123         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
124         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
125         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
126         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
127         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
128         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
129         RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
130 };
131
132 #undef RTE_RX_OFFLOAD_BIT2STR
133 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } eth_dev_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
163         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
164 };
165
166 #undef RTE_TX_OFFLOAD_BIT2STR
167
168 /**
169  * The user application callback description.
170  *
171  * It contains callback address to be registered by user application,
172  * the pointer to the parameters for callback, and the event type.
173  */
174 struct rte_eth_dev_callback {
175         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
176         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
177         void *cb_arg;                           /**< Parameter for callback */
178         void *ret_param;                        /**< Return parameter */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 int
189 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
190 {
191         int ret;
192         struct rte_devargs devargs;
193         const char *bus_param_key;
194         char *bus_str = NULL;
195         char *cls_str = NULL;
196         int str_size;
197
198         if (iter == NULL) {
199                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
200                 return -EINVAL;
201         }
202
203         if (devargs_str == NULL) {
204                 RTE_ETHDEV_LOG(ERR,
205                         "Cannot initialize iterator from NULL device description string\n");
206                 return -EINVAL;
207         }
208
209         memset(iter, 0, sizeof(*iter));
210         memset(&devargs, 0, sizeof(devargs));
211
212         /*
213          * The devargs string may use various syntaxes:
214          *   - 0000:08:00.0,representor=[1-3]
215          *   - pci:0000:06:00.0,representor=[0,5]
216          *   - class=eth,mac=00:11:22:33:44:55
217          * A new syntax is in development (not yet supported):
218          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
219          */
220
221         /*
222          * Handle pure class filter (i.e. without any bus-level argument),
223          * from future new syntax.
224          * rte_devargs_parse() is not yet supporting the new syntax,
225          * that's why this simple case is temporarily parsed here.
226          */
227 #define iter_anybus_str "class=eth,"
228         if (strncmp(devargs_str, iter_anybus_str,
229                         strlen(iter_anybus_str)) == 0) {
230                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
231                 goto end;
232         }
233
234         /* Split bus, device and parameters. */
235         ret = rte_devargs_parse(&devargs, devargs_str);
236         if (ret != 0)
237                 goto error;
238
239         /*
240          * Assume parameters of old syntax can match only at ethdev level.
241          * Extra parameters will be ignored, thanks to "+" prefix.
242          */
243         str_size = strlen(devargs.args) + 2;
244         cls_str = malloc(str_size);
245         if (cls_str == NULL) {
246                 ret = -ENOMEM;
247                 goto error;
248         }
249         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
250         if (ret != str_size - 1) {
251                 ret = -EINVAL;
252                 goto error;
253         }
254         iter->cls_str = cls_str;
255
256         iter->bus = devargs.bus;
257         if (iter->bus->dev_iterate == NULL) {
258                 ret = -ENOTSUP;
259                 goto error;
260         }
261
262         /* Convert bus args to new syntax for use with new API dev_iterate. */
263         if ((strcmp(iter->bus->name, "vdev") == 0) ||
264                 (strcmp(iter->bus->name, "fslmc") == 0) ||
265                 (strcmp(iter->bus->name, "dpaa_bus") == 0)) {
266                 bus_param_key = "name";
267         } else if (strcmp(iter->bus->name, "pci") == 0) {
268                 bus_param_key = "addr";
269         } else {
270                 ret = -ENOTSUP;
271                 goto error;
272         }
273         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
274         bus_str = malloc(str_size);
275         if (bus_str == NULL) {
276                 ret = -ENOMEM;
277                 goto error;
278         }
279         ret = snprintf(bus_str, str_size, "%s=%s",
280                         bus_param_key, devargs.name);
281         if (ret != str_size - 1) {
282                 ret = -EINVAL;
283                 goto error;
284         }
285         iter->bus_str = bus_str;
286
287 end:
288         iter->cls = rte_class_find_by_name("eth");
289         rte_devargs_reset(&devargs);
290         return 0;
291
292 error:
293         if (ret == -ENOTSUP)
294                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
295                                 iter->bus->name);
296         rte_devargs_reset(&devargs);
297         free(bus_str);
298         free(cls_str);
299         return ret;
300 }
301
302 uint16_t
303 rte_eth_iterator_next(struct rte_dev_iterator *iter)
304 {
305         if (iter == NULL) {
306                 RTE_ETHDEV_LOG(ERR,
307                         "Cannot get next device from NULL iterator\n");
308                 return RTE_MAX_ETHPORTS;
309         }
310
311         if (iter->cls == NULL) /* invalid ethdev iterator */
312                 return RTE_MAX_ETHPORTS;
313
314         do { /* loop to try all matching rte_device */
315                 /* If not pure ethdev filter and */
316                 if (iter->bus != NULL &&
317                                 /* not in middle of rte_eth_dev iteration, */
318                                 iter->class_device == NULL) {
319                         /* get next rte_device to try. */
320                         iter->device = iter->bus->dev_iterate(
321                                         iter->device, iter->bus_str, iter);
322                         if (iter->device == NULL)
323                                 break; /* no more rte_device candidate */
324                 }
325                 /* A device is matching bus part, need to check ethdev part. */
326                 iter->class_device = iter->cls->dev_iterate(
327                                 iter->class_device, iter->cls_str, iter);
328                 if (iter->class_device != NULL)
329                         return eth_dev_to_id(iter->class_device); /* match */
330         } while (iter->bus != NULL); /* need to try next rte_device */
331
332         /* No more ethdev port to iterate. */
333         rte_eth_iterator_cleanup(iter);
334         return RTE_MAX_ETHPORTS;
335 }
336
337 void
338 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
339 {
340         if (iter == NULL) {
341                 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
342                 return;
343         }
344
345         if (iter->bus_str == NULL)
346                 return; /* nothing to free in pure class filter */
347         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
348         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
349         memset(iter, 0, sizeof(*iter));
350 }
351
352 uint16_t
353 rte_eth_find_next(uint16_t port_id)
354 {
355         while (port_id < RTE_MAX_ETHPORTS &&
356                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
357                 port_id++;
358
359         if (port_id >= RTE_MAX_ETHPORTS)
360                 return RTE_MAX_ETHPORTS;
361
362         return port_id;
363 }
364
365 /*
366  * Macro to iterate over all valid ports for internal usage.
367  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
368  */
369 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
370         for (port_id = rte_eth_find_next(0); \
371              port_id < RTE_MAX_ETHPORTS; \
372              port_id = rte_eth_find_next(port_id + 1))
373
374 uint16_t
375 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
376 {
377         port_id = rte_eth_find_next(port_id);
378         while (port_id < RTE_MAX_ETHPORTS &&
379                         rte_eth_devices[port_id].device != parent)
380                 port_id = rte_eth_find_next(port_id + 1);
381
382         return port_id;
383 }
384
385 uint16_t
386 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
387 {
388         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
389         return rte_eth_find_next_of(port_id,
390                         rte_eth_devices[ref_port_id].device);
391 }
392
393 static void
394 eth_dev_shared_data_prepare(void)
395 {
396         const unsigned flags = 0;
397         const struct rte_memzone *mz;
398
399         rte_spinlock_lock(&eth_dev_shared_data_lock);
400
401         if (eth_dev_shared_data == NULL) {
402                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
403                         /* Allocate port data and ownership shared memory. */
404                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
405                                         sizeof(*eth_dev_shared_data),
406                                         rte_socket_id(), flags);
407                 } else
408                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
409                 if (mz == NULL)
410                         rte_panic("Cannot allocate ethdev shared data\n");
411
412                 eth_dev_shared_data = mz->addr;
413                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
414                         eth_dev_shared_data->next_owner_id =
415                                         RTE_ETH_DEV_NO_OWNER + 1;
416                         rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
417                         memset(eth_dev_shared_data->data, 0,
418                                sizeof(eth_dev_shared_data->data));
419                 }
420         }
421
422         rte_spinlock_unlock(&eth_dev_shared_data_lock);
423 }
424
425 static bool
426 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
427 {
428         return ethdev->data->name[0] != '\0';
429 }
430
431 static struct rte_eth_dev *
432 eth_dev_allocated(const char *name)
433 {
434         uint16_t i;
435
436         RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
437
438         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
439                 if (rte_eth_devices[i].data != NULL &&
440                     strcmp(rte_eth_devices[i].data->name, name) == 0)
441                         return &rte_eth_devices[i];
442         }
443         return NULL;
444 }
445
446 struct rte_eth_dev *
447 rte_eth_dev_allocated(const char *name)
448 {
449         struct rte_eth_dev *ethdev;
450
451         eth_dev_shared_data_prepare();
452
453         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
454
455         ethdev = eth_dev_allocated(name);
456
457         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
458
459         return ethdev;
460 }
461
462 static uint16_t
463 eth_dev_find_free_port(void)
464 {
465         uint16_t i;
466
467         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
468                 /* Using shared name field to find a free port. */
469                 if (eth_dev_shared_data->data[i].name[0] == '\0') {
470                         RTE_ASSERT(rte_eth_devices[i].state ==
471                                    RTE_ETH_DEV_UNUSED);
472                         return i;
473                 }
474         }
475         return RTE_MAX_ETHPORTS;
476 }
477
478 static struct rte_eth_dev *
479 eth_dev_get(uint16_t port_id)
480 {
481         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
482
483         eth_dev->data = &eth_dev_shared_data->data[port_id];
484
485         return eth_dev;
486 }
487
488 struct rte_eth_dev *
489 rte_eth_dev_allocate(const char *name)
490 {
491         uint16_t port_id;
492         struct rte_eth_dev *eth_dev = NULL;
493         size_t name_len;
494
495         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
496         if (name_len == 0) {
497                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
498                 return NULL;
499         }
500
501         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
502                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
503                 return NULL;
504         }
505
506         eth_dev_shared_data_prepare();
507
508         /* Synchronize port creation between primary and secondary threads. */
509         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
510
511         if (eth_dev_allocated(name) != NULL) {
512                 RTE_ETHDEV_LOG(ERR,
513                         "Ethernet device with name %s already allocated\n",
514                         name);
515                 goto unlock;
516         }
517
518         port_id = eth_dev_find_free_port();
519         if (port_id == RTE_MAX_ETHPORTS) {
520                 RTE_ETHDEV_LOG(ERR,
521                         "Reached maximum number of Ethernet ports\n");
522                 goto unlock;
523         }
524
525         eth_dev = eth_dev_get(port_id);
526         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
527         eth_dev->data->port_id = port_id;
528         eth_dev->data->mtu = RTE_ETHER_MTU;
529         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
530
531 unlock:
532         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
533
534         return eth_dev;
535 }
536
537 /*
538  * Attach to a port already registered by the primary process, which
539  * makes sure that the same device would have the same port id both
540  * in the primary and secondary process.
541  */
542 struct rte_eth_dev *
543 rte_eth_dev_attach_secondary(const char *name)
544 {
545         uint16_t i;
546         struct rte_eth_dev *eth_dev = NULL;
547
548         eth_dev_shared_data_prepare();
549
550         /* Synchronize port attachment to primary port creation and release. */
551         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
552
553         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
554                 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
555                         break;
556         }
557         if (i == RTE_MAX_ETHPORTS) {
558                 RTE_ETHDEV_LOG(ERR,
559                         "Device %s is not driven by the primary process\n",
560                         name);
561         } else {
562                 eth_dev = eth_dev_get(i);
563                 RTE_ASSERT(eth_dev->data->port_id == i);
564         }
565
566         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
567         return eth_dev;
568 }
569
570 int
571 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
572 {
573         if (eth_dev == NULL)
574                 return -EINVAL;
575
576         eth_dev_shared_data_prepare();
577
578         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
579                 rte_eth_dev_callback_process(eth_dev,
580                                 RTE_ETH_EVENT_DESTROY, NULL);
581
582         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
583
584         eth_dev->state = RTE_ETH_DEV_UNUSED;
585         eth_dev->device = NULL;
586         eth_dev->process_private = NULL;
587         eth_dev->intr_handle = NULL;
588         eth_dev->rx_pkt_burst = NULL;
589         eth_dev->tx_pkt_burst = NULL;
590         eth_dev->tx_pkt_prepare = NULL;
591         eth_dev->rx_queue_count = NULL;
592         eth_dev->rx_descriptor_done = NULL;
593         eth_dev->rx_descriptor_status = NULL;
594         eth_dev->tx_descriptor_status = NULL;
595         eth_dev->dev_ops = NULL;
596
597         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
598                 rte_free(eth_dev->data->rx_queues);
599                 rte_free(eth_dev->data->tx_queues);
600                 rte_free(eth_dev->data->mac_addrs);
601                 rte_free(eth_dev->data->hash_mac_addrs);
602                 rte_free(eth_dev->data->dev_private);
603                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
604                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
605         }
606
607         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
608
609         return 0;
610 }
611
612 int
613 rte_eth_dev_is_valid_port(uint16_t port_id)
614 {
615         if (port_id >= RTE_MAX_ETHPORTS ||
616             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
617                 return 0;
618         else
619                 return 1;
620 }
621
622 static int
623 eth_is_valid_owner_id(uint64_t owner_id)
624 {
625         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
626             eth_dev_shared_data->next_owner_id <= owner_id)
627                 return 0;
628         return 1;
629 }
630
631 uint64_t
632 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
633 {
634         port_id = rte_eth_find_next(port_id);
635         while (port_id < RTE_MAX_ETHPORTS &&
636                         rte_eth_devices[port_id].data->owner.id != owner_id)
637                 port_id = rte_eth_find_next(port_id + 1);
638
639         return port_id;
640 }
641
642 int
643 rte_eth_dev_owner_new(uint64_t *owner_id)
644 {
645         if (owner_id == NULL) {
646                 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
647                 return -EINVAL;
648         }
649
650         eth_dev_shared_data_prepare();
651
652         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
653
654         *owner_id = eth_dev_shared_data->next_owner_id++;
655
656         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
657         return 0;
658 }
659
660 static int
661 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
662                        const struct rte_eth_dev_owner *new_owner)
663 {
664         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
665         struct rte_eth_dev_owner *port_owner;
666
667         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
668                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
669                         port_id);
670                 return -ENODEV;
671         }
672
673         if (new_owner == NULL) {
674                 RTE_ETHDEV_LOG(ERR,
675                         "Cannot set ethdev port %u owner from NULL owner\n",
676                         port_id);
677                 return -EINVAL;
678         }
679
680         if (!eth_is_valid_owner_id(new_owner->id) &&
681             !eth_is_valid_owner_id(old_owner_id)) {
682                 RTE_ETHDEV_LOG(ERR,
683                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
684                        old_owner_id, new_owner->id);
685                 return -EINVAL;
686         }
687
688         port_owner = &rte_eth_devices[port_id].data->owner;
689         if (port_owner->id != old_owner_id) {
690                 RTE_ETHDEV_LOG(ERR,
691                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
692                         port_id, port_owner->name, port_owner->id);
693                 return -EPERM;
694         }
695
696         /* can not truncate (same structure) */
697         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
698
699         port_owner->id = new_owner->id;
700
701         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
702                 port_id, new_owner->name, new_owner->id);
703
704         return 0;
705 }
706
707 int
708 rte_eth_dev_owner_set(const uint16_t port_id,
709                       const struct rte_eth_dev_owner *owner)
710 {
711         int ret;
712
713         eth_dev_shared_data_prepare();
714
715         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
716
717         ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
718
719         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
720         return ret;
721 }
722
723 int
724 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
725 {
726         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
727                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
728         int ret;
729
730         eth_dev_shared_data_prepare();
731
732         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
733
734         ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
735
736         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
737         return ret;
738 }
739
740 int
741 rte_eth_dev_owner_delete(const uint64_t owner_id)
742 {
743         uint16_t port_id;
744         int ret = 0;
745
746         eth_dev_shared_data_prepare();
747
748         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
749
750         if (eth_is_valid_owner_id(owner_id)) {
751                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
752                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
753                                 memset(&rte_eth_devices[port_id].data->owner, 0,
754                                        sizeof(struct rte_eth_dev_owner));
755                 RTE_ETHDEV_LOG(NOTICE,
756                         "All port owners owned by %016"PRIx64" identifier have removed\n",
757                         owner_id);
758         } else {
759                 RTE_ETHDEV_LOG(ERR,
760                                "Invalid owner id=%016"PRIx64"\n",
761                                owner_id);
762                 ret = -EINVAL;
763         }
764
765         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
766
767         return ret;
768 }
769
770 int
771 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
772 {
773         struct rte_eth_dev *ethdev;
774
775         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
776         ethdev = &rte_eth_devices[port_id];
777
778         if (!eth_dev_is_allocated(ethdev)) {
779                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
780                         port_id);
781                 return -ENODEV;
782         }
783
784         if (owner == NULL) {
785                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
786                         port_id);
787                 return -EINVAL;
788         }
789
790         eth_dev_shared_data_prepare();
791
792         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
793         rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
794         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
795
796         return 0;
797 }
798
799 int
800 rte_eth_dev_socket_id(uint16_t port_id)
801 {
802         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
803         return rte_eth_devices[port_id].data->numa_node;
804 }
805
806 void *
807 rte_eth_dev_get_sec_ctx(uint16_t port_id)
808 {
809         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
810         return rte_eth_devices[port_id].security_ctx;
811 }
812
813 uint16_t
814 rte_eth_dev_count_avail(void)
815 {
816         uint16_t p;
817         uint16_t count;
818
819         count = 0;
820
821         RTE_ETH_FOREACH_DEV(p)
822                 count++;
823
824         return count;
825 }
826
827 uint16_t
828 rte_eth_dev_count_total(void)
829 {
830         uint16_t port, count = 0;
831
832         RTE_ETH_FOREACH_VALID_DEV(port)
833                 count++;
834
835         return count;
836 }
837
838 int
839 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
840 {
841         char *tmp;
842
843         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
844
845         if (name == NULL) {
846                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
847                         port_id);
848                 return -EINVAL;
849         }
850
851         /* shouldn't check 'rte_eth_devices[i].data',
852          * because it might be overwritten by VDEV PMD */
853         tmp = eth_dev_shared_data->data[port_id].name;
854         strcpy(name, tmp);
855         return 0;
856 }
857
858 int
859 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
860 {
861         uint16_t pid;
862
863         if (name == NULL) {
864                 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
865                 return -EINVAL;
866         }
867
868         if (port_id == NULL) {
869                 RTE_ETHDEV_LOG(ERR,
870                         "Cannot get port ID to NULL for %s\n", name);
871                 return -EINVAL;
872         }
873
874         RTE_ETH_FOREACH_VALID_DEV(pid)
875                 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
876                         *port_id = pid;
877                         return 0;
878                 }
879
880         return -ENODEV;
881 }
882
883 static int
884 eth_err(uint16_t port_id, int ret)
885 {
886         if (ret == 0)
887                 return 0;
888         if (rte_eth_dev_is_removed(port_id))
889                 return -EIO;
890         return ret;
891 }
892
893 static int
894 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
895 {
896         uint16_t old_nb_queues = dev->data->nb_rx_queues;
897         void **rxq;
898         unsigned i;
899
900         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
901                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
902                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
903                                 RTE_CACHE_LINE_SIZE);
904                 if (dev->data->rx_queues == NULL) {
905                         dev->data->nb_rx_queues = 0;
906                         return -(ENOMEM);
907                 }
908         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
909                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
910
911                 rxq = dev->data->rx_queues;
912
913                 for (i = nb_queues; i < old_nb_queues; i++)
914                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
915                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
916                                 RTE_CACHE_LINE_SIZE);
917                 if (rxq == NULL)
918                         return -(ENOMEM);
919                 if (nb_queues > old_nb_queues) {
920                         uint16_t new_qs = nb_queues - old_nb_queues;
921
922                         memset(rxq + old_nb_queues, 0,
923                                 sizeof(rxq[0]) * new_qs);
924                 }
925
926                 dev->data->rx_queues = rxq;
927
928         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
929                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
930
931                 rxq = dev->data->rx_queues;
932
933                 for (i = nb_queues; i < old_nb_queues; i++)
934                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
935
936                 rte_free(dev->data->rx_queues);
937                 dev->data->rx_queues = NULL;
938         }
939         dev->data->nb_rx_queues = nb_queues;
940         return 0;
941 }
942
943 static int
944 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
945 {
946         uint16_t port_id;
947
948         if (rx_queue_id >= dev->data->nb_rx_queues) {
949                 port_id = dev->data->port_id;
950                 RTE_ETHDEV_LOG(ERR,
951                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
952                                rx_queue_id, port_id);
953                 return -EINVAL;
954         }
955
956         if (dev->data->rx_queues[rx_queue_id] == NULL) {
957                 port_id = dev->data->port_id;
958                 RTE_ETHDEV_LOG(ERR,
959                                "Queue %u of device with port_id=%u has not been setup\n",
960                                rx_queue_id, port_id);
961                 return -EINVAL;
962         }
963
964         return 0;
965 }
966
967 static int
968 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
969 {
970         uint16_t port_id;
971
972         if (tx_queue_id >= dev->data->nb_tx_queues) {
973                 port_id = dev->data->port_id;
974                 RTE_ETHDEV_LOG(ERR,
975                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
976                                tx_queue_id, port_id);
977                 return -EINVAL;
978         }
979
980         if (dev->data->tx_queues[tx_queue_id] == NULL) {
981                 port_id = dev->data->port_id;
982                 RTE_ETHDEV_LOG(ERR,
983                                "Queue %u of device with port_id=%u has not been setup\n",
984                                tx_queue_id, port_id);
985                 return -EINVAL;
986         }
987
988         return 0;
989 }
990
991 int
992 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
993 {
994         struct rte_eth_dev *dev;
995         int ret;
996
997         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
998         dev = &rte_eth_devices[port_id];
999
1000         if (!dev->data->dev_started) {
1001                 RTE_ETHDEV_LOG(ERR,
1002                         "Port %u must be started before start any queue\n",
1003                         port_id);
1004                 return -EINVAL;
1005         }
1006
1007         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1008         if (ret != 0)
1009                 return ret;
1010
1011         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
1012
1013         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1014                 RTE_ETHDEV_LOG(INFO,
1015                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1016                         rx_queue_id, port_id);
1017                 return -EINVAL;
1018         }
1019
1020         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1021                 RTE_ETHDEV_LOG(INFO,
1022                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1023                         rx_queue_id, port_id);
1024                 return 0;
1025         }
1026
1027         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
1028 }
1029
1030 int
1031 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
1032 {
1033         struct rte_eth_dev *dev;
1034         int ret;
1035
1036         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1037         dev = &rte_eth_devices[port_id];
1038
1039         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1040         if (ret != 0)
1041                 return ret;
1042
1043         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
1044
1045         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1046                 RTE_ETHDEV_LOG(INFO,
1047                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1048                         rx_queue_id, port_id);
1049                 return -EINVAL;
1050         }
1051
1052         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1053                 RTE_ETHDEV_LOG(INFO,
1054                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1055                         rx_queue_id, port_id);
1056                 return 0;
1057         }
1058
1059         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1060 }
1061
1062 int
1063 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1064 {
1065         struct rte_eth_dev *dev;
1066         int ret;
1067
1068         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1069         dev = &rte_eth_devices[port_id];
1070
1071         if (!dev->data->dev_started) {
1072                 RTE_ETHDEV_LOG(ERR,
1073                         "Port %u must be started before start any queue\n",
1074                         port_id);
1075                 return -EINVAL;
1076         }
1077
1078         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1079         if (ret != 0)
1080                 return ret;
1081
1082         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1083
1084         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1085                 RTE_ETHDEV_LOG(INFO,
1086                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1087                         tx_queue_id, port_id);
1088                 return -EINVAL;
1089         }
1090
1091         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1092                 RTE_ETHDEV_LOG(INFO,
1093                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1094                         tx_queue_id, port_id);
1095                 return 0;
1096         }
1097
1098         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1099 }
1100
1101 int
1102 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1103 {
1104         struct rte_eth_dev *dev;
1105         int ret;
1106
1107         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1108         dev = &rte_eth_devices[port_id];
1109
1110         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1111         if (ret != 0)
1112                 return ret;
1113
1114         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1115
1116         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1117                 RTE_ETHDEV_LOG(INFO,
1118                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1119                         tx_queue_id, port_id);
1120                 return -EINVAL;
1121         }
1122
1123         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1124                 RTE_ETHDEV_LOG(INFO,
1125                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1126                         tx_queue_id, port_id);
1127                 return 0;
1128         }
1129
1130         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1131 }
1132
1133 static int
1134 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1135 {
1136         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1137         void **txq;
1138         unsigned i;
1139
1140         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1141                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1142                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1143                                                    RTE_CACHE_LINE_SIZE);
1144                 if (dev->data->tx_queues == NULL) {
1145                         dev->data->nb_tx_queues = 0;
1146                         return -(ENOMEM);
1147                 }
1148         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1149                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1150
1151                 txq = dev->data->tx_queues;
1152
1153                 for (i = nb_queues; i < old_nb_queues; i++)
1154                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1155                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1156                                   RTE_CACHE_LINE_SIZE);
1157                 if (txq == NULL)
1158                         return -ENOMEM;
1159                 if (nb_queues > old_nb_queues) {
1160                         uint16_t new_qs = nb_queues - old_nb_queues;
1161
1162                         memset(txq + old_nb_queues, 0,
1163                                sizeof(txq[0]) * new_qs);
1164                 }
1165
1166                 dev->data->tx_queues = txq;
1167
1168         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1169                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1170
1171                 txq = dev->data->tx_queues;
1172
1173                 for (i = nb_queues; i < old_nb_queues; i++)
1174                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1175
1176                 rte_free(dev->data->tx_queues);
1177                 dev->data->tx_queues = NULL;
1178         }
1179         dev->data->nb_tx_queues = nb_queues;
1180         return 0;
1181 }
1182
1183 uint32_t
1184 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1185 {
1186         switch (speed) {
1187         case ETH_SPEED_NUM_10M:
1188                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1189         case ETH_SPEED_NUM_100M:
1190                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1191         case ETH_SPEED_NUM_1G:
1192                 return ETH_LINK_SPEED_1G;
1193         case ETH_SPEED_NUM_2_5G:
1194                 return ETH_LINK_SPEED_2_5G;
1195         case ETH_SPEED_NUM_5G:
1196                 return ETH_LINK_SPEED_5G;
1197         case ETH_SPEED_NUM_10G:
1198                 return ETH_LINK_SPEED_10G;
1199         case ETH_SPEED_NUM_20G:
1200                 return ETH_LINK_SPEED_20G;
1201         case ETH_SPEED_NUM_25G:
1202                 return ETH_LINK_SPEED_25G;
1203         case ETH_SPEED_NUM_40G:
1204                 return ETH_LINK_SPEED_40G;
1205         case ETH_SPEED_NUM_50G:
1206                 return ETH_LINK_SPEED_50G;
1207         case ETH_SPEED_NUM_56G:
1208                 return ETH_LINK_SPEED_56G;
1209         case ETH_SPEED_NUM_100G:
1210                 return ETH_LINK_SPEED_100G;
1211         case ETH_SPEED_NUM_200G:
1212                 return ETH_LINK_SPEED_200G;
1213         default:
1214                 return 0;
1215         }
1216 }
1217
1218 const char *
1219 rte_eth_dev_rx_offload_name(uint64_t offload)
1220 {
1221         const char *name = "UNKNOWN";
1222         unsigned int i;
1223
1224         for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1225                 if (offload == eth_dev_rx_offload_names[i].offload) {
1226                         name = eth_dev_rx_offload_names[i].name;
1227                         break;
1228                 }
1229         }
1230
1231         return name;
1232 }
1233
1234 const char *
1235 rte_eth_dev_tx_offload_name(uint64_t offload)
1236 {
1237         const char *name = "UNKNOWN";
1238         unsigned int i;
1239
1240         for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1241                 if (offload == eth_dev_tx_offload_names[i].offload) {
1242                         name = eth_dev_tx_offload_names[i].name;
1243                         break;
1244                 }
1245         }
1246
1247         return name;
1248 }
1249
1250 static inline int
1251 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1252                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1253 {
1254         int ret = 0;
1255
1256         if (dev_info_size == 0) {
1257                 if (config_size != max_rx_pkt_len) {
1258                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1259                                        " %u != %u is not allowed\n",
1260                                        port_id, config_size, max_rx_pkt_len);
1261                         ret = -EINVAL;
1262                 }
1263         } else if (config_size > dev_info_size) {
1264                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1265                                "> max allowed value %u\n", port_id, config_size,
1266                                dev_info_size);
1267                 ret = -EINVAL;
1268         } else if (config_size < RTE_ETHER_MIN_LEN) {
1269                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1270                                "< min allowed value %u\n", port_id, config_size,
1271                                (unsigned int)RTE_ETHER_MIN_LEN);
1272                 ret = -EINVAL;
1273         }
1274         return ret;
1275 }
1276
1277 /*
1278  * Validate offloads that are requested through rte_eth_dev_configure against
1279  * the offloads successfully set by the ethernet device.
1280  *
1281  * @param port_id
1282  *   The port identifier of the Ethernet device.
1283  * @param req_offloads
1284  *   The offloads that have been requested through `rte_eth_dev_configure`.
1285  * @param set_offloads
1286  *   The offloads successfully set by the ethernet device.
1287  * @param offload_type
1288  *   The offload type i.e. Rx/Tx string.
1289  * @param offload_name
1290  *   The function that prints the offload name.
1291  * @return
1292  *   - (0) if validation successful.
1293  *   - (-EINVAL) if requested offload has been silently disabled.
1294  *
1295  */
1296 static int
1297 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1298                   uint64_t set_offloads, const char *offload_type,
1299                   const char *(*offload_name)(uint64_t))
1300 {
1301         uint64_t offloads_diff = req_offloads ^ set_offloads;
1302         uint64_t offload;
1303         int ret = 0;
1304
1305         while (offloads_diff != 0) {
1306                 /* Check if any offload is requested but not enabled. */
1307                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1308                 if (offload & req_offloads) {
1309                         RTE_ETHDEV_LOG(ERR,
1310                                 "Port %u failed to enable %s offload %s\n",
1311                                 port_id, offload_type, offload_name(offload));
1312                         ret = -EINVAL;
1313                 }
1314
1315                 /* Check if offload couldn't be disabled. */
1316                 if (offload & set_offloads) {
1317                         RTE_ETHDEV_LOG(DEBUG,
1318                                 "Port %u %s offload %s is not requested but enabled\n",
1319                                 port_id, offload_type, offload_name(offload));
1320                 }
1321
1322                 offloads_diff &= ~offload;
1323         }
1324
1325         return ret;
1326 }
1327
1328 int
1329 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1330                       const struct rte_eth_conf *dev_conf)
1331 {
1332         struct rte_eth_dev *dev;
1333         struct rte_eth_dev_info dev_info;
1334         struct rte_eth_conf orig_conf;
1335         uint16_t overhead_len;
1336         int diag;
1337         int ret;
1338         uint16_t old_mtu;
1339
1340         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1341         dev = &rte_eth_devices[port_id];
1342
1343         if (dev_conf == NULL) {
1344                 RTE_ETHDEV_LOG(ERR,
1345                         "Cannot configure ethdev port %u from NULL config\n",
1346                         port_id);
1347                 return -EINVAL;
1348         }
1349
1350         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1351
1352         if (dev->data->dev_started) {
1353                 RTE_ETHDEV_LOG(ERR,
1354                         "Port %u must be stopped to allow configuration\n",
1355                         port_id);
1356                 return -EBUSY;
1357         }
1358
1359         /*
1360          * Ensure that "dev_configured" is always 0 each time prepare to do
1361          * dev_configure() to avoid any non-anticipated behaviour.
1362          * And set to 1 when dev_configure() is executed successfully.
1363          */
1364         dev->data->dev_configured = 0;
1365
1366          /* Store original config, as rollback required on failure */
1367         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1368
1369         /*
1370          * Copy the dev_conf parameter into the dev structure.
1371          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1372          */
1373         if (dev_conf != &dev->data->dev_conf)
1374                 memcpy(&dev->data->dev_conf, dev_conf,
1375                        sizeof(dev->data->dev_conf));
1376
1377         /* Backup mtu for rollback */
1378         old_mtu = dev->data->mtu;
1379
1380         ret = rte_eth_dev_info_get(port_id, &dev_info);
1381         if (ret != 0)
1382                 goto rollback;
1383
1384         /* Get the real Ethernet overhead length */
1385         if (dev_info.max_mtu != UINT16_MAX &&
1386             dev_info.max_rx_pktlen > dev_info.max_mtu)
1387                 overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
1388         else
1389                 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1390
1391         /* If number of queues specified by application for both Rx and Tx is
1392          * zero, use driver preferred values. This cannot be done individually
1393          * as it is valid for either Tx or Rx (but not both) to be zero.
1394          * If driver does not provide any preferred valued, fall back on
1395          * EAL defaults.
1396          */
1397         if (nb_rx_q == 0 && nb_tx_q == 0) {
1398                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1399                 if (nb_rx_q == 0)
1400                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1401                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1402                 if (nb_tx_q == 0)
1403                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1404         }
1405
1406         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1407                 RTE_ETHDEV_LOG(ERR,
1408                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1409                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1410                 ret = -EINVAL;
1411                 goto rollback;
1412         }
1413
1414         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1415                 RTE_ETHDEV_LOG(ERR,
1416                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1417                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1418                 ret = -EINVAL;
1419                 goto rollback;
1420         }
1421
1422         /*
1423          * Check that the numbers of RX and TX queues are not greater
1424          * than the maximum number of RX and TX queues supported by the
1425          * configured device.
1426          */
1427         if (nb_rx_q > dev_info.max_rx_queues) {
1428                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1429                         port_id, nb_rx_q, dev_info.max_rx_queues);
1430                 ret = -EINVAL;
1431                 goto rollback;
1432         }
1433
1434         if (nb_tx_q > dev_info.max_tx_queues) {
1435                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1436                         port_id, nb_tx_q, dev_info.max_tx_queues);
1437                 ret = -EINVAL;
1438                 goto rollback;
1439         }
1440
1441         /* Check that the device supports requested interrupts */
1442         if ((dev_conf->intr_conf.lsc == 1) &&
1443                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1444                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1445                         dev->device->driver->name);
1446                 ret = -EINVAL;
1447                 goto rollback;
1448         }
1449         if ((dev_conf->intr_conf.rmv == 1) &&
1450                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1451                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1452                         dev->device->driver->name);
1453                 ret = -EINVAL;
1454                 goto rollback;
1455         }
1456
1457         /*
1458          * If jumbo frames are enabled, check that the maximum RX packet
1459          * length is supported by the configured device.
1460          */
1461         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1462                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1463                         RTE_ETHDEV_LOG(ERR,
1464                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1465                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1466                                 dev_info.max_rx_pktlen);
1467                         ret = -EINVAL;
1468                         goto rollback;
1469                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1470                         RTE_ETHDEV_LOG(ERR,
1471                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1472                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1473                                 (unsigned int)RTE_ETHER_MIN_LEN);
1474                         ret = -EINVAL;
1475                         goto rollback;
1476                 }
1477
1478                 /* Scale the MTU size to adapt max_rx_pkt_len */
1479                 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
1480                                 overhead_len;
1481         } else {
1482                 uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
1483                 if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
1484                     pktlen > RTE_ETHER_MTU + overhead_len)
1485                         /* Use default value */
1486                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1487                                                 RTE_ETHER_MTU + overhead_len;
1488         }
1489
1490         /*
1491          * If LRO is enabled, check that the maximum aggregated packet
1492          * size is supported by the configured device.
1493          */
1494         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1495                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1496                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1497                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1498                 ret = eth_dev_check_lro_pkt_size(port_id,
1499                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1500                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1501                                 dev_info.max_lro_pkt_size);
1502                 if (ret != 0)
1503                         goto rollback;
1504         }
1505
1506         /* Any requested offloading must be within its device capabilities */
1507         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1508              dev_conf->rxmode.offloads) {
1509                 RTE_ETHDEV_LOG(ERR,
1510                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1511                         "capabilities 0x%"PRIx64" in %s()\n",
1512                         port_id, dev_conf->rxmode.offloads,
1513                         dev_info.rx_offload_capa,
1514                         __func__);
1515                 ret = -EINVAL;
1516                 goto rollback;
1517         }
1518         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1519              dev_conf->txmode.offloads) {
1520                 RTE_ETHDEV_LOG(ERR,
1521                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1522                         "capabilities 0x%"PRIx64" in %s()\n",
1523                         port_id, dev_conf->txmode.offloads,
1524                         dev_info.tx_offload_capa,
1525                         __func__);
1526                 ret = -EINVAL;
1527                 goto rollback;
1528         }
1529
1530         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1531                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1532
1533         /* Check that device supports requested rss hash functions. */
1534         if ((dev_info.flow_type_rss_offloads |
1535              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1536             dev_info.flow_type_rss_offloads) {
1537                 RTE_ETHDEV_LOG(ERR,
1538                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1539                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1540                         dev_info.flow_type_rss_offloads);
1541                 ret = -EINVAL;
1542                 goto rollback;
1543         }
1544
1545         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1546         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1547             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1548                 RTE_ETHDEV_LOG(ERR,
1549                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1550                         port_id,
1551                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1552                 ret = -EINVAL;
1553                 goto rollback;
1554         }
1555
1556         /*
1557          * Setup new number of RX/TX queues and reconfigure device.
1558          */
1559         diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1560         if (diag != 0) {
1561                 RTE_ETHDEV_LOG(ERR,
1562                         "Port%u eth_dev_rx_queue_config = %d\n",
1563                         port_id, diag);
1564                 ret = diag;
1565                 goto rollback;
1566         }
1567
1568         diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1569         if (diag != 0) {
1570                 RTE_ETHDEV_LOG(ERR,
1571                         "Port%u eth_dev_tx_queue_config = %d\n",
1572                         port_id, diag);
1573                 eth_dev_rx_queue_config(dev, 0);
1574                 ret = diag;
1575                 goto rollback;
1576         }
1577
1578         diag = (*dev->dev_ops->dev_configure)(dev);
1579         if (diag != 0) {
1580                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1581                         port_id, diag);
1582                 ret = eth_err(port_id, diag);
1583                 goto reset_queues;
1584         }
1585
1586         /* Initialize Rx profiling if enabled at compilation time. */
1587         diag = __rte_eth_dev_profile_init(port_id, dev);
1588         if (diag != 0) {
1589                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1590                         port_id, diag);
1591                 ret = eth_err(port_id, diag);
1592                 goto reset_queues;
1593         }
1594
1595         /* Validate Rx offloads. */
1596         diag = eth_dev_validate_offloads(port_id,
1597                         dev_conf->rxmode.offloads,
1598                         dev->data->dev_conf.rxmode.offloads, "Rx",
1599                         rte_eth_dev_rx_offload_name);
1600         if (diag != 0) {
1601                 ret = diag;
1602                 goto reset_queues;
1603         }
1604
1605         /* Validate Tx offloads. */
1606         diag = eth_dev_validate_offloads(port_id,
1607                         dev_conf->txmode.offloads,
1608                         dev->data->dev_conf.txmode.offloads, "Tx",
1609                         rte_eth_dev_tx_offload_name);
1610         if (diag != 0) {
1611                 ret = diag;
1612                 goto reset_queues;
1613         }
1614
1615         dev->data->dev_configured = 1;
1616         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1617         return 0;
1618 reset_queues:
1619         eth_dev_rx_queue_config(dev, 0);
1620         eth_dev_tx_queue_config(dev, 0);
1621 rollback:
1622         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1623         if (old_mtu != dev->data->mtu)
1624                 dev->data->mtu = old_mtu;
1625
1626         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1627         return ret;
1628 }
1629
1630 void
1631 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1632 {
1633         if (dev->data->dev_started) {
1634                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1635                         dev->data->port_id);
1636                 return;
1637         }
1638
1639         eth_dev_rx_queue_config(dev, 0);
1640         eth_dev_tx_queue_config(dev, 0);
1641
1642         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1643 }
1644
1645 static void
1646 eth_dev_mac_restore(struct rte_eth_dev *dev,
1647                         struct rte_eth_dev_info *dev_info)
1648 {
1649         struct rte_ether_addr *addr;
1650         uint16_t i;
1651         uint32_t pool = 0;
1652         uint64_t pool_mask;
1653
1654         /* replay MAC address configuration including default MAC */
1655         addr = &dev->data->mac_addrs[0];
1656         if (*dev->dev_ops->mac_addr_set != NULL)
1657                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1658         else if (*dev->dev_ops->mac_addr_add != NULL)
1659                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1660
1661         if (*dev->dev_ops->mac_addr_add != NULL) {
1662                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1663                         addr = &dev->data->mac_addrs[i];
1664
1665                         /* skip zero address */
1666                         if (rte_is_zero_ether_addr(addr))
1667                                 continue;
1668
1669                         pool = 0;
1670                         pool_mask = dev->data->mac_pool_sel[i];
1671
1672                         do {
1673                                 if (pool_mask & 1ULL)
1674                                         (*dev->dev_ops->mac_addr_add)(dev,
1675                                                 addr, i, pool);
1676                                 pool_mask >>= 1;
1677                                 pool++;
1678                         } while (pool_mask);
1679                 }
1680         }
1681 }
1682
1683 static int
1684 eth_dev_config_restore(struct rte_eth_dev *dev,
1685                 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1686 {
1687         int ret;
1688
1689         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1690                 eth_dev_mac_restore(dev, dev_info);
1691
1692         /* replay promiscuous configuration */
1693         /*
1694          * use callbacks directly since we don't need port_id check and
1695          * would like to bypass the same value set
1696          */
1697         if (rte_eth_promiscuous_get(port_id) == 1 &&
1698             *dev->dev_ops->promiscuous_enable != NULL) {
1699                 ret = eth_err(port_id,
1700                               (*dev->dev_ops->promiscuous_enable)(dev));
1701                 if (ret != 0 && ret != -ENOTSUP) {
1702                         RTE_ETHDEV_LOG(ERR,
1703                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1704                                 port_id, rte_strerror(-ret));
1705                         return ret;
1706                 }
1707         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1708                    *dev->dev_ops->promiscuous_disable != NULL) {
1709                 ret = eth_err(port_id,
1710                               (*dev->dev_ops->promiscuous_disable)(dev));
1711                 if (ret != 0 && ret != -ENOTSUP) {
1712                         RTE_ETHDEV_LOG(ERR,
1713                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1714                                 port_id, rte_strerror(-ret));
1715                         return ret;
1716                 }
1717         }
1718
1719         /* replay all multicast configuration */
1720         /*
1721          * use callbacks directly since we don't need port_id check and
1722          * would like to bypass the same value set
1723          */
1724         if (rte_eth_allmulticast_get(port_id) == 1 &&
1725             *dev->dev_ops->allmulticast_enable != NULL) {
1726                 ret = eth_err(port_id,
1727                               (*dev->dev_ops->allmulticast_enable)(dev));
1728                 if (ret != 0 && ret != -ENOTSUP) {
1729                         RTE_ETHDEV_LOG(ERR,
1730                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1731                                 port_id, rte_strerror(-ret));
1732                         return ret;
1733                 }
1734         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1735                    *dev->dev_ops->allmulticast_disable != NULL) {
1736                 ret = eth_err(port_id,
1737                               (*dev->dev_ops->allmulticast_disable)(dev));
1738                 if (ret != 0 && ret != -ENOTSUP) {
1739                         RTE_ETHDEV_LOG(ERR,
1740                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1741                                 port_id, rte_strerror(-ret));
1742                         return ret;
1743                 }
1744         }
1745
1746         return 0;
1747 }
1748
1749 int
1750 rte_eth_dev_start(uint16_t port_id)
1751 {
1752         struct rte_eth_dev *dev;
1753         struct rte_eth_dev_info dev_info;
1754         int diag;
1755         int ret, ret_stop;
1756
1757         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1758         dev = &rte_eth_devices[port_id];
1759
1760         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1761
1762         if (dev->data->dev_configured == 0) {
1763                 RTE_ETHDEV_LOG(INFO,
1764                         "Device with port_id=%"PRIu16" is not configured.\n",
1765                         port_id);
1766                 return -EINVAL;
1767         }
1768
1769         if (dev->data->dev_started != 0) {
1770                 RTE_ETHDEV_LOG(INFO,
1771                         "Device with port_id=%"PRIu16" already started\n",
1772                         port_id);
1773                 return 0;
1774         }
1775
1776         ret = rte_eth_dev_info_get(port_id, &dev_info);
1777         if (ret != 0)
1778                 return ret;
1779
1780         /* Lets restore MAC now if device does not support live change */
1781         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1782                 eth_dev_mac_restore(dev, &dev_info);
1783
1784         diag = (*dev->dev_ops->dev_start)(dev);
1785         if (diag == 0)
1786                 dev->data->dev_started = 1;
1787         else
1788                 return eth_err(port_id, diag);
1789
1790         ret = eth_dev_config_restore(dev, &dev_info, port_id);
1791         if (ret != 0) {
1792                 RTE_ETHDEV_LOG(ERR,
1793                         "Error during restoring configuration for device (port %u): %s\n",
1794                         port_id, rte_strerror(-ret));
1795                 ret_stop = rte_eth_dev_stop(port_id);
1796                 if (ret_stop != 0) {
1797                         RTE_ETHDEV_LOG(ERR,
1798                                 "Failed to stop device (port %u): %s\n",
1799                                 port_id, rte_strerror(-ret_stop));
1800                 }
1801
1802                 return ret;
1803         }
1804
1805         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1806                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1807                 (*dev->dev_ops->link_update)(dev, 0);
1808         }
1809
1810         rte_ethdev_trace_start(port_id);
1811         return 0;
1812 }
1813
1814 int
1815 rte_eth_dev_stop(uint16_t port_id)
1816 {
1817         struct rte_eth_dev *dev;
1818         int ret;
1819
1820         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1821         dev = &rte_eth_devices[port_id];
1822
1823         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1824
1825         if (dev->data->dev_started == 0) {
1826                 RTE_ETHDEV_LOG(INFO,
1827                         "Device with port_id=%"PRIu16" already stopped\n",
1828                         port_id);
1829                 return 0;
1830         }
1831
1832         dev->data->dev_started = 0;
1833         ret = (*dev->dev_ops->dev_stop)(dev);
1834         rte_ethdev_trace_stop(port_id, ret);
1835
1836         return ret;
1837 }
1838
1839 int
1840 rte_eth_dev_set_link_up(uint16_t port_id)
1841 {
1842         struct rte_eth_dev *dev;
1843
1844         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1845         dev = &rte_eth_devices[port_id];
1846
1847         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1848         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1849 }
1850
1851 int
1852 rte_eth_dev_set_link_down(uint16_t port_id)
1853 {
1854         struct rte_eth_dev *dev;
1855
1856         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1857         dev = &rte_eth_devices[port_id];
1858
1859         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1860         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1861 }
1862
1863 int
1864 rte_eth_dev_close(uint16_t port_id)
1865 {
1866         struct rte_eth_dev *dev;
1867         int firsterr, binerr;
1868         int *lasterr = &firsterr;
1869
1870         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1871         dev = &rte_eth_devices[port_id];
1872
1873         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1874         *lasterr = (*dev->dev_ops->dev_close)(dev);
1875         if (*lasterr != 0)
1876                 lasterr = &binerr;
1877
1878         rte_ethdev_trace_close(port_id);
1879         *lasterr = rte_eth_dev_release_port(dev);
1880
1881         return firsterr;
1882 }
1883
1884 int
1885 rte_eth_dev_reset(uint16_t port_id)
1886 {
1887         struct rte_eth_dev *dev;
1888         int ret;
1889
1890         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1891         dev = &rte_eth_devices[port_id];
1892
1893         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1894
1895         ret = rte_eth_dev_stop(port_id);
1896         if (ret != 0) {
1897                 RTE_ETHDEV_LOG(ERR,
1898                         "Failed to stop device (port %u) before reset: %s - ignore\n",
1899                         port_id, rte_strerror(-ret));
1900         }
1901         ret = dev->dev_ops->dev_reset(dev);
1902
1903         return eth_err(port_id, ret);
1904 }
1905
1906 int
1907 rte_eth_dev_is_removed(uint16_t port_id)
1908 {
1909         struct rte_eth_dev *dev;
1910         int ret;
1911
1912         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1913         dev = &rte_eth_devices[port_id];
1914
1915         if (dev->state == RTE_ETH_DEV_REMOVED)
1916                 return 1;
1917
1918         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1919
1920         ret = dev->dev_ops->is_removed(dev);
1921         if (ret != 0)
1922                 /* Device is physically removed. */
1923                 dev->state = RTE_ETH_DEV_REMOVED;
1924
1925         return ret;
1926 }
1927
1928 static int
1929 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1930                              uint16_t n_seg, uint32_t *mbp_buf_size,
1931                              const struct rte_eth_dev_info *dev_info)
1932 {
1933         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1934         struct rte_mempool *mp_first;
1935         uint32_t offset_mask;
1936         uint16_t seg_idx;
1937
1938         if (n_seg > seg_capa->max_nseg) {
1939                 RTE_ETHDEV_LOG(ERR,
1940                                "Requested Rx segments %u exceed supported %u\n",
1941                                n_seg, seg_capa->max_nseg);
1942                 return -EINVAL;
1943         }
1944         /*
1945          * Check the sizes and offsets against buffer sizes
1946          * for each segment specified in extended configuration.
1947          */
1948         mp_first = rx_seg[0].mp;
1949         offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1950         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1951                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1952                 uint32_t length = rx_seg[seg_idx].length;
1953                 uint32_t offset = rx_seg[seg_idx].offset;
1954
1955                 if (mpl == NULL) {
1956                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1957                         return -EINVAL;
1958                 }
1959                 if (seg_idx != 0 && mp_first != mpl &&
1960                     seg_capa->multi_pools == 0) {
1961                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1962                         return -ENOTSUP;
1963                 }
1964                 if (offset != 0) {
1965                         if (seg_capa->offset_allowed == 0) {
1966                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1967                                 return -ENOTSUP;
1968                         }
1969                         if (offset & offset_mask) {
1970                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1971                                                offset,
1972                                                seg_capa->offset_align_log2);
1973                                 return -EINVAL;
1974                         }
1975                 }
1976                 if (mpl->private_data_size <
1977                         sizeof(struct rte_pktmbuf_pool_private)) {
1978                         RTE_ETHDEV_LOG(ERR,
1979                                        "%s private_data_size %u < %u\n",
1980                                        mpl->name, mpl->private_data_size,
1981                                        (unsigned int)sizeof
1982                                         (struct rte_pktmbuf_pool_private));
1983                         return -ENOSPC;
1984                 }
1985                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1986                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1987                 length = length != 0 ? length : *mbp_buf_size;
1988                 if (*mbp_buf_size < length + offset) {
1989                         RTE_ETHDEV_LOG(ERR,
1990                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1991                                        mpl->name, *mbp_buf_size,
1992                                        length + offset, length, offset);
1993                         return -EINVAL;
1994                 }
1995         }
1996         return 0;
1997 }
1998
1999 int
2000 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2001                        uint16_t nb_rx_desc, unsigned int socket_id,
2002                        const struct rte_eth_rxconf *rx_conf,
2003                        struct rte_mempool *mp)
2004 {
2005         int ret;
2006         uint32_t mbp_buf_size;
2007         struct rte_eth_dev *dev;
2008         struct rte_eth_dev_info dev_info;
2009         struct rte_eth_rxconf local_conf;
2010         void **rxq;
2011
2012         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2013         dev = &rte_eth_devices[port_id];
2014
2015         if (rx_queue_id >= dev->data->nb_rx_queues) {
2016                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2017                 return -EINVAL;
2018         }
2019
2020         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
2021
2022         ret = rte_eth_dev_info_get(port_id, &dev_info);
2023         if (ret != 0)
2024                 return ret;
2025
2026         if (mp != NULL) {
2027                 /* Single pool configuration check. */
2028                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
2029                         RTE_ETHDEV_LOG(ERR,
2030                                        "Ambiguous segment configuration\n");
2031                         return -EINVAL;
2032                 }
2033                 /*
2034                  * Check the size of the mbuf data buffer, this value
2035                  * must be provided in the private data of the memory pool.
2036                  * First check that the memory pool(s) has a valid private data.
2037                  */
2038                 if (mp->private_data_size <
2039                                 sizeof(struct rte_pktmbuf_pool_private)) {
2040                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
2041                                 mp->name, mp->private_data_size,
2042                                 (unsigned int)
2043                                 sizeof(struct rte_pktmbuf_pool_private));
2044                         return -ENOSPC;
2045                 }
2046                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
2047                 if (mbp_buf_size < dev_info.min_rx_bufsize +
2048                                    RTE_PKTMBUF_HEADROOM) {
2049                         RTE_ETHDEV_LOG(ERR,
2050                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
2051                                        mp->name, mbp_buf_size,
2052                                        RTE_PKTMBUF_HEADROOM +
2053                                        dev_info.min_rx_bufsize,
2054                                        RTE_PKTMBUF_HEADROOM,
2055                                        dev_info.min_rx_bufsize);
2056                         return -EINVAL;
2057                 }
2058         } else {
2059                 const struct rte_eth_rxseg_split *rx_seg;
2060                 uint16_t n_seg;
2061
2062                 /* Extended multi-segment configuration check. */
2063                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
2064                         RTE_ETHDEV_LOG(ERR,
2065                                        "Memory pool is null and no extended configuration provided\n");
2066                         return -EINVAL;
2067                 }
2068
2069                 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2070                 n_seg = rx_conf->rx_nseg;
2071
2072                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2073                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
2074                                                            &mbp_buf_size,
2075                                                            &dev_info);
2076                         if (ret != 0)
2077                                 return ret;
2078                 } else {
2079                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2080                         return -EINVAL;
2081                 }
2082         }
2083
2084         /* Use default specified by driver, if nb_rx_desc is zero */
2085         if (nb_rx_desc == 0) {
2086                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
2087                 /* If driver default is also zero, fall back on EAL default */
2088                 if (nb_rx_desc == 0)
2089                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2090         }
2091
2092         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2093                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2094                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2095
2096                 RTE_ETHDEV_LOG(ERR,
2097                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2098                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2099                         dev_info.rx_desc_lim.nb_min,
2100                         dev_info.rx_desc_lim.nb_align);
2101                 return -EINVAL;
2102         }
2103
2104         if (dev->data->dev_started &&
2105                 !(dev_info.dev_capa &
2106                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2107                 return -EBUSY;
2108
2109         if (dev->data->dev_started &&
2110                 (dev->data->rx_queue_state[rx_queue_id] !=
2111                         RTE_ETH_QUEUE_STATE_STOPPED))
2112                 return -EBUSY;
2113
2114         rxq = dev->data->rx_queues;
2115         if (rxq[rx_queue_id]) {
2116                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2117                                         -ENOTSUP);
2118                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2119                 rxq[rx_queue_id] = NULL;
2120         }
2121
2122         if (rx_conf == NULL)
2123                 rx_conf = &dev_info.default_rxconf;
2124
2125         local_conf = *rx_conf;
2126
2127         /*
2128          * If an offloading has already been enabled in
2129          * rte_eth_dev_configure(), it has been enabled on all queues,
2130          * so there is no need to enable it in this queue again.
2131          * The local_conf.offloads input to underlying PMD only carries
2132          * those offloadings which are only enabled on this queue and
2133          * not enabled on all queues.
2134          */
2135         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2136
2137         /*
2138          * New added offloadings for this queue are those not enabled in
2139          * rte_eth_dev_configure() and they must be per-queue type.
2140          * A pure per-port offloading can't be enabled on a queue while
2141          * disabled on another queue. A pure per-port offloading can't
2142          * be enabled for any queue as new added one if it hasn't been
2143          * enabled in rte_eth_dev_configure().
2144          */
2145         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2146              local_conf.offloads) {
2147                 RTE_ETHDEV_LOG(ERR,
2148                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2149                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2150                         port_id, rx_queue_id, local_conf.offloads,
2151                         dev_info.rx_queue_offload_capa,
2152                         __func__);
2153                 return -EINVAL;
2154         }
2155
2156         /*
2157          * If LRO is enabled, check that the maximum aggregated packet
2158          * size is supported by the configured device.
2159          */
2160         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2161                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2162                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
2163                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2164                 int ret = eth_dev_check_lro_pkt_size(port_id,
2165                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2166                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2167                                 dev_info.max_lro_pkt_size);
2168                 if (ret != 0)
2169                         return ret;
2170         }
2171
2172         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2173                                               socket_id, &local_conf, mp);
2174         if (!ret) {
2175                 if (!dev->data->min_rx_buf_size ||
2176                     dev->data->min_rx_buf_size > mbp_buf_size)
2177                         dev->data->min_rx_buf_size = mbp_buf_size;
2178         }
2179
2180         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2181                 rx_conf, ret);
2182         return eth_err(port_id, ret);
2183 }
2184
2185 int
2186 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2187                                uint16_t nb_rx_desc,
2188                                const struct rte_eth_hairpin_conf *conf)
2189 {
2190         int ret;
2191         struct rte_eth_dev *dev;
2192         struct rte_eth_hairpin_cap cap;
2193         void **rxq;
2194         int i;
2195         int count;
2196
2197         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2198         dev = &rte_eth_devices[port_id];
2199
2200         if (rx_queue_id >= dev->data->nb_rx_queues) {
2201                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2202                 return -EINVAL;
2203         }
2204
2205         if (conf == NULL) {
2206                 RTE_ETHDEV_LOG(ERR,
2207                         "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
2208                         port_id);
2209                 return -EINVAL;
2210         }
2211
2212         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2213         if (ret != 0)
2214                 return ret;
2215         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2216                                 -ENOTSUP);
2217         /* if nb_rx_desc is zero use max number of desc from the driver. */
2218         if (nb_rx_desc == 0)
2219                 nb_rx_desc = cap.max_nb_desc;
2220         if (nb_rx_desc > cap.max_nb_desc) {
2221                 RTE_ETHDEV_LOG(ERR,
2222                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2223                         nb_rx_desc, cap.max_nb_desc);
2224                 return -EINVAL;
2225         }
2226         if (conf->peer_count > cap.max_rx_2_tx) {
2227                 RTE_ETHDEV_LOG(ERR,
2228                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2229                         conf->peer_count, cap.max_rx_2_tx);
2230                 return -EINVAL;
2231         }
2232         if (conf->peer_count == 0) {
2233                 RTE_ETHDEV_LOG(ERR,
2234                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2235                         conf->peer_count);
2236                 return -EINVAL;
2237         }
2238         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2239              cap.max_nb_queues != UINT16_MAX; i++) {
2240                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2241                         count++;
2242         }
2243         if (count > cap.max_nb_queues) {
2244                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2245                 cap.max_nb_queues);
2246                 return -EINVAL;
2247         }
2248         if (dev->data->dev_started)
2249                 return -EBUSY;
2250         rxq = dev->data->rx_queues;
2251         if (rxq[rx_queue_id] != NULL) {
2252                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2253                                         -ENOTSUP);
2254                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2255                 rxq[rx_queue_id] = NULL;
2256         }
2257         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2258                                                       nb_rx_desc, conf);
2259         if (ret == 0)
2260                 dev->data->rx_queue_state[rx_queue_id] =
2261                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2262         return eth_err(port_id, ret);
2263 }
2264
2265 int
2266 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2267                        uint16_t nb_tx_desc, unsigned int socket_id,
2268                        const struct rte_eth_txconf *tx_conf)
2269 {
2270         struct rte_eth_dev *dev;
2271         struct rte_eth_dev_info dev_info;
2272         struct rte_eth_txconf local_conf;
2273         void **txq;
2274         int ret;
2275
2276         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2277         dev = &rte_eth_devices[port_id];
2278
2279         if (tx_queue_id >= dev->data->nb_tx_queues) {
2280                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2281                 return -EINVAL;
2282         }
2283
2284         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2285
2286         ret = rte_eth_dev_info_get(port_id, &dev_info);
2287         if (ret != 0)
2288                 return ret;
2289
2290         /* Use default specified by driver, if nb_tx_desc is zero */
2291         if (nb_tx_desc == 0) {
2292                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2293                 /* If driver default is zero, fall back on EAL default */
2294                 if (nb_tx_desc == 0)
2295                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2296         }
2297         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2298             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2299             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2300                 RTE_ETHDEV_LOG(ERR,
2301                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2302                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2303                         dev_info.tx_desc_lim.nb_min,
2304                         dev_info.tx_desc_lim.nb_align);
2305                 return -EINVAL;
2306         }
2307
2308         if (dev->data->dev_started &&
2309                 !(dev_info.dev_capa &
2310                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2311                 return -EBUSY;
2312
2313         if (dev->data->dev_started &&
2314                 (dev->data->tx_queue_state[tx_queue_id] !=
2315                         RTE_ETH_QUEUE_STATE_STOPPED))
2316                 return -EBUSY;
2317
2318         txq = dev->data->tx_queues;
2319         if (txq[tx_queue_id]) {
2320                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2321                                         -ENOTSUP);
2322                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2323                 txq[tx_queue_id] = NULL;
2324         }
2325
2326         if (tx_conf == NULL)
2327                 tx_conf = &dev_info.default_txconf;
2328
2329         local_conf = *tx_conf;
2330
2331         /*
2332          * If an offloading has already been enabled in
2333          * rte_eth_dev_configure(), it has been enabled on all queues,
2334          * so there is no need to enable it in this queue again.
2335          * The local_conf.offloads input to underlying PMD only carries
2336          * those offloadings which are only enabled on this queue and
2337          * not enabled on all queues.
2338          */
2339         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2340
2341         /*
2342          * New added offloadings for this queue are those not enabled in
2343          * rte_eth_dev_configure() and they must be per-queue type.
2344          * A pure per-port offloading can't be enabled on a queue while
2345          * disabled on another queue. A pure per-port offloading can't
2346          * be enabled for any queue as new added one if it hasn't been
2347          * enabled in rte_eth_dev_configure().
2348          */
2349         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2350              local_conf.offloads) {
2351                 RTE_ETHDEV_LOG(ERR,
2352                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2353                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2354                         port_id, tx_queue_id, local_conf.offloads,
2355                         dev_info.tx_queue_offload_capa,
2356                         __func__);
2357                 return -EINVAL;
2358         }
2359
2360         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2361         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2362                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2363 }
2364
2365 int
2366 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2367                                uint16_t nb_tx_desc,
2368                                const struct rte_eth_hairpin_conf *conf)
2369 {
2370         struct rte_eth_dev *dev;
2371         struct rte_eth_hairpin_cap cap;
2372         void **txq;
2373         int i;
2374         int count;
2375         int ret;
2376
2377         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2378         dev = &rte_eth_devices[port_id];
2379
2380         if (tx_queue_id >= dev->data->nb_tx_queues) {
2381                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2382                 return -EINVAL;
2383         }
2384
2385         if (conf == NULL) {
2386                 RTE_ETHDEV_LOG(ERR,
2387                         "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2388                         port_id);
2389                 return -EINVAL;
2390         }
2391
2392         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2393         if (ret != 0)
2394                 return ret;
2395         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2396                                 -ENOTSUP);
2397         /* if nb_rx_desc is zero use max number of desc from the driver. */
2398         if (nb_tx_desc == 0)
2399                 nb_tx_desc = cap.max_nb_desc;
2400         if (nb_tx_desc > cap.max_nb_desc) {
2401                 RTE_ETHDEV_LOG(ERR,
2402                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2403                         nb_tx_desc, cap.max_nb_desc);
2404                 return -EINVAL;
2405         }
2406         if (conf->peer_count > cap.max_tx_2_rx) {
2407                 RTE_ETHDEV_LOG(ERR,
2408                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2409                         conf->peer_count, cap.max_tx_2_rx);
2410                 return -EINVAL;
2411         }
2412         if (conf->peer_count == 0) {
2413                 RTE_ETHDEV_LOG(ERR,
2414                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2415                         conf->peer_count);
2416                 return -EINVAL;
2417         }
2418         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2419              cap.max_nb_queues != UINT16_MAX; i++) {
2420                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2421                         count++;
2422         }
2423         if (count > cap.max_nb_queues) {
2424                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2425                 cap.max_nb_queues);
2426                 return -EINVAL;
2427         }
2428         if (dev->data->dev_started)
2429                 return -EBUSY;
2430         txq = dev->data->tx_queues;
2431         if (txq[tx_queue_id] != NULL) {
2432                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2433                                         -ENOTSUP);
2434                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2435                 txq[tx_queue_id] = NULL;
2436         }
2437         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2438                 (dev, tx_queue_id, nb_tx_desc, conf);
2439         if (ret == 0)
2440                 dev->data->tx_queue_state[tx_queue_id] =
2441                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2442         return eth_err(port_id, ret);
2443 }
2444
2445 int
2446 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2447 {
2448         struct rte_eth_dev *dev;
2449         int ret;
2450
2451         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2452         dev = &rte_eth_devices[tx_port];
2453
2454         if (dev->data->dev_started == 0) {
2455                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2456                 return -EBUSY;
2457         }
2458
2459         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2460         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2461         if (ret != 0)
2462                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2463                                " to Rx %d (%d - all ports)\n",
2464                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2465
2466         return ret;
2467 }
2468
2469 int
2470 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2471 {
2472         struct rte_eth_dev *dev;
2473         int ret;
2474
2475         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2476         dev = &rte_eth_devices[tx_port];
2477
2478         if (dev->data->dev_started == 0) {
2479                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2480                 return -EBUSY;
2481         }
2482
2483         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2484         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2485         if (ret != 0)
2486                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2487                                " from Rx %d (%d - all ports)\n",
2488                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2489
2490         return ret;
2491 }
2492
2493 int
2494 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2495                                size_t len, uint32_t direction)
2496 {
2497         struct rte_eth_dev *dev;
2498         int ret;
2499
2500         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2501         dev = &rte_eth_devices[port_id];
2502
2503         if (peer_ports == NULL) {
2504                 RTE_ETHDEV_LOG(ERR,
2505                         "Cannot get ethdev port %u hairpin peer ports to NULL\n",
2506                         port_id);
2507                 return -EINVAL;
2508         }
2509
2510         if (len == 0) {
2511                 RTE_ETHDEV_LOG(ERR,
2512                         "Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2513                         port_id);
2514                 return -EINVAL;
2515         }
2516
2517         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2518                                 -ENOTSUP);
2519
2520         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2521                                                       len, direction);
2522         if (ret < 0)
2523                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2524                                port_id, direction ? "Rx" : "Tx");
2525
2526         return ret;
2527 }
2528
2529 void
2530 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2531                 void *userdata __rte_unused)
2532 {
2533         rte_pktmbuf_free_bulk(pkts, unsent);
2534 }
2535
2536 void
2537 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2538                 void *userdata)
2539 {
2540         uint64_t *count = userdata;
2541
2542         rte_pktmbuf_free_bulk(pkts, unsent);
2543         *count += unsent;
2544 }
2545
2546 int
2547 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2548                 buffer_tx_error_fn cbfn, void *userdata)
2549 {
2550         if (buffer == NULL) {
2551                 RTE_ETHDEV_LOG(ERR,
2552                         "Cannot set Tx buffer error callback to NULL buffer\n");
2553                 return -EINVAL;
2554         }
2555
2556         buffer->error_callback = cbfn;
2557         buffer->error_userdata = userdata;
2558         return 0;
2559 }
2560
2561 int
2562 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2563 {
2564         int ret = 0;
2565
2566         if (buffer == NULL) {
2567                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2568                 return -EINVAL;
2569         }
2570
2571         buffer->size = size;
2572         if (buffer->error_callback == NULL) {
2573                 ret = rte_eth_tx_buffer_set_err_callback(
2574                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2575         }
2576
2577         return ret;
2578 }
2579
2580 int
2581 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2582 {
2583         struct rte_eth_dev *dev;
2584         int ret;
2585
2586         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2587         dev = &rte_eth_devices[port_id];
2588
2589         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2590
2591         /* Call driver to free pending mbufs. */
2592         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2593                                                free_cnt);
2594         return eth_err(port_id, ret);
2595 }
2596
2597 int
2598 rte_eth_promiscuous_enable(uint16_t port_id)
2599 {
2600         struct rte_eth_dev *dev;
2601         int diag = 0;
2602
2603         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2604         dev = &rte_eth_devices[port_id];
2605
2606         if (dev->data->promiscuous == 1)
2607                 return 0;
2608
2609         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2610
2611         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2612         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2613
2614         return eth_err(port_id, diag);
2615 }
2616
2617 int
2618 rte_eth_promiscuous_disable(uint16_t port_id)
2619 {
2620         struct rte_eth_dev *dev;
2621         int diag = 0;
2622
2623         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2624         dev = &rte_eth_devices[port_id];
2625
2626         if (dev->data->promiscuous == 0)
2627                 return 0;
2628
2629         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2630
2631         dev->data->promiscuous = 0;
2632         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2633         if (diag != 0)
2634                 dev->data->promiscuous = 1;
2635
2636         return eth_err(port_id, diag);
2637 }
2638
2639 int
2640 rte_eth_promiscuous_get(uint16_t port_id)
2641 {
2642         struct rte_eth_dev *dev;
2643
2644         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2645         dev = &rte_eth_devices[port_id];
2646
2647         return dev->data->promiscuous;
2648 }
2649
2650 int
2651 rte_eth_allmulticast_enable(uint16_t port_id)
2652 {
2653         struct rte_eth_dev *dev;
2654         int diag;
2655
2656         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2657         dev = &rte_eth_devices[port_id];
2658
2659         if (dev->data->all_multicast == 1)
2660                 return 0;
2661
2662         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2663         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2664         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2665
2666         return eth_err(port_id, diag);
2667 }
2668
2669 int
2670 rte_eth_allmulticast_disable(uint16_t port_id)
2671 {
2672         struct rte_eth_dev *dev;
2673         int diag;
2674
2675         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2676         dev = &rte_eth_devices[port_id];
2677
2678         if (dev->data->all_multicast == 0)
2679                 return 0;
2680
2681         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2682         dev->data->all_multicast = 0;
2683         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2684         if (diag != 0)
2685                 dev->data->all_multicast = 1;
2686
2687         return eth_err(port_id, diag);
2688 }
2689
2690 int
2691 rte_eth_allmulticast_get(uint16_t port_id)
2692 {
2693         struct rte_eth_dev *dev;
2694
2695         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2696         dev = &rte_eth_devices[port_id];
2697
2698         return dev->data->all_multicast;
2699 }
2700
2701 int
2702 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2703 {
2704         struct rte_eth_dev *dev;
2705
2706         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2707         dev = &rte_eth_devices[port_id];
2708
2709         if (eth_link == NULL) {
2710                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2711                         port_id);
2712                 return -EINVAL;
2713         }
2714
2715         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2716                 rte_eth_linkstatus_get(dev, eth_link);
2717         else {
2718                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2719                 (*dev->dev_ops->link_update)(dev, 1);
2720                 *eth_link = dev->data->dev_link;
2721         }
2722
2723         return 0;
2724 }
2725
2726 int
2727 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2728 {
2729         struct rte_eth_dev *dev;
2730
2731         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2732         dev = &rte_eth_devices[port_id];
2733
2734         if (eth_link == NULL) {
2735                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2736                         port_id);
2737                 return -EINVAL;
2738         }
2739
2740         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2741                 rte_eth_linkstatus_get(dev, eth_link);
2742         else {
2743                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2744                 (*dev->dev_ops->link_update)(dev, 0);
2745                 *eth_link = dev->data->dev_link;
2746         }
2747
2748         return 0;
2749 }
2750
2751 const char *
2752 rte_eth_link_speed_to_str(uint32_t link_speed)
2753 {
2754         switch (link_speed) {
2755         case ETH_SPEED_NUM_NONE: return "None";
2756         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2757         case ETH_SPEED_NUM_100M: return "100 Mbps";
2758         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2759         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2760         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2761         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2762         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2763         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2764         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2765         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2766         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2767         case ETH_SPEED_NUM_100G: return "100 Gbps";
2768         case ETH_SPEED_NUM_200G: return "200 Gbps";
2769         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2770         default: return "Invalid";
2771         }
2772 }
2773
2774 int
2775 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2776 {
2777         if (str == NULL) {
2778                 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2779                 return -EINVAL;
2780         }
2781
2782         if (len == 0) {
2783                 RTE_ETHDEV_LOG(ERR,
2784                         "Cannot convert link to string with zero size\n");
2785                 return -EINVAL;
2786         }
2787
2788         if (eth_link == NULL) {
2789                 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2790                 return -EINVAL;
2791         }
2792
2793         if (eth_link->link_status == ETH_LINK_DOWN)
2794                 return snprintf(str, len, "Link down");
2795         else
2796                 return snprintf(str, len, "Link up at %s %s %s",
2797                         rte_eth_link_speed_to_str(eth_link->link_speed),
2798                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2799                         "FDX" : "HDX",
2800                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2801                         "Autoneg" : "Fixed");
2802 }
2803
2804 int
2805 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2806 {
2807         struct rte_eth_dev *dev;
2808
2809         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2810         dev = &rte_eth_devices[port_id];
2811
2812         if (stats == NULL) {
2813                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2814                         port_id);
2815                 return -EINVAL;
2816         }
2817
2818         memset(stats, 0, sizeof(*stats));
2819
2820         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2821         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2822         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2823 }
2824
2825 int
2826 rte_eth_stats_reset(uint16_t port_id)
2827 {
2828         struct rte_eth_dev *dev;
2829         int ret;
2830
2831         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2832         dev = &rte_eth_devices[port_id];
2833
2834         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2835         ret = (*dev->dev_ops->stats_reset)(dev);
2836         if (ret != 0)
2837                 return eth_err(port_id, ret);
2838
2839         dev->data->rx_mbuf_alloc_failed = 0;
2840
2841         return 0;
2842 }
2843
2844 static inline int
2845 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2846 {
2847         uint16_t nb_rxqs, nb_txqs;
2848         int count;
2849
2850         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2851         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2852
2853         count = RTE_NB_STATS;
2854         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2855                 count += nb_rxqs * RTE_NB_RXQ_STATS;
2856                 count += nb_txqs * RTE_NB_TXQ_STATS;
2857         }
2858
2859         return count;
2860 }
2861
2862 static int
2863 eth_dev_get_xstats_count(uint16_t port_id)
2864 {
2865         struct rte_eth_dev *dev;
2866         int count;
2867
2868         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2869         dev = &rte_eth_devices[port_id];
2870         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2871                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2872                                 NULL, 0);
2873                 if (count < 0)
2874                         return eth_err(port_id, count);
2875         }
2876         if (dev->dev_ops->xstats_get_names != NULL) {
2877                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2878                 if (count < 0)
2879                         return eth_err(port_id, count);
2880         } else
2881                 count = 0;
2882
2883
2884         count += eth_dev_get_xstats_basic_count(dev);
2885
2886         return count;
2887 }
2888
2889 int
2890 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2891                 uint64_t *id)
2892 {
2893         int cnt_xstats, idx_xstat;
2894
2895         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2896
2897         if (xstat_name == NULL) {
2898                 RTE_ETHDEV_LOG(ERR,
2899                         "Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2900                         port_id);
2901                 return -ENOMEM;
2902         }
2903
2904         if (id == NULL) {
2905                 RTE_ETHDEV_LOG(ERR,
2906                         "Cannot get ethdev port %u xstats ID to NULL\n",
2907                         port_id);
2908                 return -ENOMEM;
2909         }
2910
2911         /* Get count */
2912         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2913         if (cnt_xstats  < 0) {
2914                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2915                 return -ENODEV;
2916         }
2917
2918         /* Get id-name lookup table */
2919         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2920
2921         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2922                         port_id, xstats_names, cnt_xstats, NULL)) {
2923                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2924                 return -1;
2925         }
2926
2927         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2928                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2929                         *id = idx_xstat;
2930                         return 0;
2931                 };
2932         }
2933
2934         return -EINVAL;
2935 }
2936
2937 /* retrieve basic stats names */
2938 static int
2939 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2940         struct rte_eth_xstat_name *xstats_names)
2941 {
2942         int cnt_used_entries = 0;
2943         uint32_t idx, id_queue;
2944         uint16_t num_q;
2945
2946         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2947                 strlcpy(xstats_names[cnt_used_entries].name,
2948                         eth_dev_stats_strings[idx].name,
2949                         sizeof(xstats_names[0].name));
2950                 cnt_used_entries++;
2951         }
2952
2953         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2954                 return cnt_used_entries;
2955
2956         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2957         for (id_queue = 0; id_queue < num_q; id_queue++) {
2958                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2959                         snprintf(xstats_names[cnt_used_entries].name,
2960                                 sizeof(xstats_names[0].name),
2961                                 "rx_q%u_%s",
2962                                 id_queue, eth_dev_rxq_stats_strings[idx].name);
2963                         cnt_used_entries++;
2964                 }
2965
2966         }
2967         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2968         for (id_queue = 0; id_queue < num_q; id_queue++) {
2969                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2970                         snprintf(xstats_names[cnt_used_entries].name,
2971                                 sizeof(xstats_names[0].name),
2972                                 "tx_q%u_%s",
2973                                 id_queue, eth_dev_txq_stats_strings[idx].name);
2974                         cnt_used_entries++;
2975                 }
2976         }
2977         return cnt_used_entries;
2978 }
2979
2980 /* retrieve ethdev extended statistics names */
2981 int
2982 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2983         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2984         uint64_t *ids)
2985 {
2986         struct rte_eth_xstat_name *xstats_names_copy;
2987         unsigned int no_basic_stat_requested = 1;
2988         unsigned int no_ext_stat_requested = 1;
2989         unsigned int expected_entries;
2990         unsigned int basic_count;
2991         struct rte_eth_dev *dev;
2992         unsigned int i;
2993         int ret;
2994
2995         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2996         dev = &rte_eth_devices[port_id];
2997
2998         basic_count = eth_dev_get_xstats_basic_count(dev);
2999         ret = eth_dev_get_xstats_count(port_id);
3000         if (ret < 0)
3001                 return ret;
3002         expected_entries = (unsigned int)ret;
3003
3004         /* Return max number of stats if no ids given */
3005         if (!ids) {
3006                 if (!xstats_names)
3007                         return expected_entries;
3008                 else if (xstats_names && size < expected_entries)
3009                         return expected_entries;
3010         }
3011
3012         if (ids && !xstats_names)
3013                 return -EINVAL;
3014
3015         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
3016                 uint64_t ids_copy[size];
3017
3018                 for (i = 0; i < size; i++) {
3019                         if (ids[i] < basic_count) {
3020                                 no_basic_stat_requested = 0;
3021                                 break;
3022                         }
3023
3024                         /*
3025                          * Convert ids to xstats ids that PMD knows.
3026                          * ids known by user are basic + extended stats.
3027                          */
3028                         ids_copy[i] = ids[i] - basic_count;
3029                 }
3030
3031                 if (no_basic_stat_requested)
3032                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
3033                                         xstats_names, ids_copy, size);
3034         }
3035
3036         /* Retrieve all stats */
3037         if (!ids) {
3038                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
3039                                 expected_entries);
3040                 if (num_stats < 0 || num_stats > (int)expected_entries)
3041                         return num_stats;
3042                 else
3043                         return expected_entries;
3044         }
3045
3046         xstats_names_copy = calloc(expected_entries,
3047                 sizeof(struct rte_eth_xstat_name));
3048
3049         if (!xstats_names_copy) {
3050                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
3051                 return -ENOMEM;
3052         }
3053
3054         if (ids) {
3055                 for (i = 0; i < size; i++) {
3056                         if (ids[i] >= basic_count) {
3057                                 no_ext_stat_requested = 0;
3058                                 break;
3059                         }
3060                 }
3061         }
3062
3063         /* Fill xstats_names_copy structure */
3064         if (ids && no_ext_stat_requested) {
3065                 eth_basic_stats_get_names(dev, xstats_names_copy);
3066         } else {
3067                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
3068                         expected_entries);
3069                 if (ret < 0) {
3070                         free(xstats_names_copy);
3071                         return ret;
3072                 }
3073         }
3074
3075         /* Filter stats */
3076         for (i = 0; i < size; i++) {
3077                 if (ids[i] >= expected_entries) {
3078                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3079                         free(xstats_names_copy);
3080                         return -1;
3081                 }
3082                 xstats_names[i] = xstats_names_copy[ids[i]];
3083         }
3084
3085         free(xstats_names_copy);
3086         return size;
3087 }
3088
3089 int
3090 rte_eth_xstats_get_names(uint16_t port_id,
3091         struct rte_eth_xstat_name *xstats_names,
3092         unsigned int size)
3093 {
3094         struct rte_eth_dev *dev;
3095         int cnt_used_entries;
3096         int cnt_expected_entries;
3097         int cnt_driver_entries;
3098
3099         cnt_expected_entries = eth_dev_get_xstats_count(port_id);
3100         if (xstats_names == NULL || cnt_expected_entries < 0 ||
3101                         (int)size < cnt_expected_entries)
3102                 return cnt_expected_entries;
3103
3104         /* port_id checked in eth_dev_get_xstats_count() */
3105         dev = &rte_eth_devices[port_id];
3106
3107         cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3108
3109         if (dev->dev_ops->xstats_get_names != NULL) {
3110                 /* If there are any driver-specific xstats, append them
3111                  * to end of list.
3112                  */
3113                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3114                         dev,
3115                         xstats_names + cnt_used_entries,
3116                         size - cnt_used_entries);
3117                 if (cnt_driver_entries < 0)
3118                         return eth_err(port_id, cnt_driver_entries);
3119                 cnt_used_entries += cnt_driver_entries;
3120         }
3121
3122         return cnt_used_entries;
3123 }
3124
3125
3126 static int
3127 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
3128 {
3129         struct rte_eth_dev *dev;
3130         struct rte_eth_stats eth_stats;
3131         unsigned int count = 0, i, q;
3132         uint64_t val, *stats_ptr;
3133         uint16_t nb_rxqs, nb_txqs;
3134         int ret;
3135
3136         ret = rte_eth_stats_get(port_id, &eth_stats);
3137         if (ret < 0)
3138                 return ret;
3139
3140         dev = &rte_eth_devices[port_id];
3141
3142         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3143         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3144
3145         /* global stats */
3146         for (i = 0; i < RTE_NB_STATS; i++) {
3147                 stats_ptr = RTE_PTR_ADD(&eth_stats,
3148                                         eth_dev_stats_strings[i].offset);
3149                 val = *stats_ptr;
3150                 xstats[count++].value = val;
3151         }
3152
3153         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3154                 return count;
3155
3156         /* per-rxq stats */
3157         for (q = 0; q < nb_rxqs; q++) {
3158                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3159                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3160                                         eth_dev_rxq_stats_strings[i].offset +
3161                                         q * sizeof(uint64_t));
3162                         val = *stats_ptr;
3163                         xstats[count++].value = val;
3164                 }
3165         }
3166
3167         /* per-txq stats */
3168         for (q = 0; q < nb_txqs; q++) {
3169                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3170                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3171                                         eth_dev_txq_stats_strings[i].offset +
3172                                         q * sizeof(uint64_t));
3173                         val = *stats_ptr;
3174                         xstats[count++].value = val;
3175                 }
3176         }
3177         return count;
3178 }
3179
3180 /* retrieve ethdev extended statistics */
3181 int
3182 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3183                          uint64_t *values, unsigned int size)
3184 {
3185         unsigned int no_basic_stat_requested = 1;
3186         unsigned int no_ext_stat_requested = 1;
3187         unsigned int num_xstats_filled;
3188         unsigned int basic_count;
3189         uint16_t expected_entries;
3190         struct rte_eth_dev *dev;
3191         unsigned int i;
3192         int ret;
3193
3194         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3195         dev = &rte_eth_devices[port_id];
3196
3197         ret = eth_dev_get_xstats_count(port_id);
3198         if (ret < 0)
3199                 return ret;
3200         expected_entries = (uint16_t)ret;
3201         struct rte_eth_xstat xstats[expected_entries];
3202         basic_count = eth_dev_get_xstats_basic_count(dev);
3203
3204         /* Return max number of stats if no ids given */
3205         if (!ids) {
3206                 if (!values)
3207                         return expected_entries;
3208                 else if (values && size < expected_entries)
3209                         return expected_entries;
3210         }
3211
3212         if (ids && !values)
3213                 return -EINVAL;
3214
3215         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3216                 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3217                 uint64_t ids_copy[size];
3218
3219                 for (i = 0; i < size; i++) {
3220                         if (ids[i] < basic_count) {
3221                                 no_basic_stat_requested = 0;
3222                                 break;
3223                         }
3224
3225                         /*
3226                          * Convert ids to xstats ids that PMD knows.
3227                          * ids known by user are basic + extended stats.
3228                          */
3229                         ids_copy[i] = ids[i] - basic_count;
3230                 }
3231
3232                 if (no_basic_stat_requested)
3233                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3234                                         values, size);
3235         }
3236
3237         if (ids) {
3238                 for (i = 0; i < size; i++) {
3239                         if (ids[i] >= basic_count) {
3240                                 no_ext_stat_requested = 0;
3241                                 break;
3242                         }
3243                 }
3244         }
3245
3246         /* Fill the xstats structure */
3247         if (ids && no_ext_stat_requested)
3248                 ret = eth_basic_stats_get(port_id, xstats);
3249         else
3250                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3251
3252         if (ret < 0)
3253                 return ret;
3254         num_xstats_filled = (unsigned int)ret;
3255
3256         /* Return all stats */
3257         if (!ids) {
3258                 for (i = 0; i < num_xstats_filled; i++)
3259                         values[i] = xstats[i].value;
3260                 return expected_entries;
3261         }
3262
3263         /* Filter stats */
3264         for (i = 0; i < size; i++) {
3265                 if (ids[i] >= expected_entries) {
3266                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3267                         return -1;
3268                 }
3269                 values[i] = xstats[ids[i]].value;
3270         }
3271         return size;
3272 }
3273
3274 int
3275 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3276         unsigned int n)
3277 {
3278         struct rte_eth_dev *dev;
3279         unsigned int count = 0, i;
3280         signed int xcount = 0;
3281         uint16_t nb_rxqs, nb_txqs;
3282         int ret;
3283
3284         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3285         dev = &rte_eth_devices[port_id];
3286
3287         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3288         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3289
3290         /* Return generic statistics */
3291         count = RTE_NB_STATS;
3292         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3293                 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3294
3295         /* implemented by the driver */
3296         if (dev->dev_ops->xstats_get != NULL) {
3297                 /* Retrieve the xstats from the driver at the end of the
3298                  * xstats struct.
3299                  */
3300                 xcount = (*dev->dev_ops->xstats_get)(dev,
3301                                      xstats ? xstats + count : NULL,
3302                                      (n > count) ? n - count : 0);
3303
3304                 if (xcount < 0)
3305                         return eth_err(port_id, xcount);
3306         }
3307
3308         if (n < count + xcount || xstats == NULL)
3309                 return count + xcount;
3310
3311         /* now fill the xstats structure */
3312         ret = eth_basic_stats_get(port_id, xstats);
3313         if (ret < 0)
3314                 return ret;
3315         count = ret;
3316
3317         for (i = 0; i < count; i++)
3318                 xstats[i].id = i;
3319         /* add an offset to driver-specific stats */
3320         for ( ; i < count + xcount; i++)
3321                 xstats[i].id += count;
3322
3323         return count + xcount;
3324 }
3325
3326 /* reset ethdev extended statistics */
3327 int
3328 rte_eth_xstats_reset(uint16_t port_id)
3329 {
3330         struct rte_eth_dev *dev;
3331
3332         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3333         dev = &rte_eth_devices[port_id];
3334
3335         /* implemented by the driver */
3336         if (dev->dev_ops->xstats_reset != NULL)
3337                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3338
3339         /* fallback to default */
3340         return rte_eth_stats_reset(port_id);
3341 }
3342
3343 static int
3344 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3345                 uint8_t stat_idx, uint8_t is_rx)
3346 {
3347         struct rte_eth_dev *dev;
3348
3349         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3350         dev = &rte_eth_devices[port_id];
3351
3352         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3353                 return -EINVAL;
3354
3355         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3356                 return -EINVAL;
3357
3358         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3359                 return -EINVAL;
3360
3361         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3362         return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3363 }
3364
3365 int
3366 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3367                 uint8_t stat_idx)
3368 {
3369         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3370                                                 tx_queue_id,
3371                                                 stat_idx, STAT_QMAP_TX));
3372 }
3373
3374 int
3375 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3376                 uint8_t stat_idx)
3377 {
3378         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3379                                                 rx_queue_id,
3380                                                 stat_idx, STAT_QMAP_RX));
3381 }
3382
3383 int
3384 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3385 {
3386         struct rte_eth_dev *dev;
3387
3388         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3389         dev = &rte_eth_devices[port_id];
3390
3391         if (fw_version == NULL && fw_size > 0) {
3392                 RTE_ETHDEV_LOG(ERR,
3393                         "Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3394                         port_id);
3395                 return -EINVAL;
3396         }
3397
3398         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3399         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3400                                                         fw_version, fw_size));
3401 }
3402
3403 int
3404 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3405 {
3406         struct rte_eth_dev *dev;
3407         const struct rte_eth_desc_lim lim = {
3408                 .nb_max = UINT16_MAX,
3409                 .nb_min = 0,
3410                 .nb_align = 1,
3411                 .nb_seg_max = UINT16_MAX,
3412                 .nb_mtu_seg_max = UINT16_MAX,
3413         };
3414         int diag;
3415
3416         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3417         dev = &rte_eth_devices[port_id];
3418
3419         if (dev_info == NULL) {
3420                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3421                         port_id);
3422                 return -EINVAL;
3423         }
3424
3425         /*
3426          * Init dev_info before port_id check since caller does not have
3427          * return status and does not know if get is successful or not.
3428          */
3429         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3430         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3431
3432         dev_info->rx_desc_lim = lim;
3433         dev_info->tx_desc_lim = lim;
3434         dev_info->device = dev->device;
3435         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3436         dev_info->max_mtu = UINT16_MAX;
3437
3438         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3439         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3440         if (diag != 0) {
3441                 /* Cleanup already filled in device information */
3442                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3443                 return eth_err(port_id, diag);
3444         }
3445
3446         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3447         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3448                         RTE_MAX_QUEUES_PER_PORT);
3449         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3450                         RTE_MAX_QUEUES_PER_PORT);
3451
3452         dev_info->driver_name = dev->device->driver->name;
3453         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3454         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3455
3456         dev_info->dev_flags = &dev->data->dev_flags;
3457
3458         return 0;
3459 }
3460
3461 int
3462 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3463                                  uint32_t *ptypes, int num)
3464 {
3465         int i, j;
3466         struct rte_eth_dev *dev;
3467         const uint32_t *all_ptypes;
3468
3469         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3470         dev = &rte_eth_devices[port_id];
3471
3472         if (ptypes == NULL && num > 0) {
3473                 RTE_ETHDEV_LOG(ERR,
3474                         "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3475                         port_id);
3476                 return -EINVAL;
3477         }
3478
3479         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3480         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3481
3482         if (!all_ptypes)
3483                 return 0;
3484
3485         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3486                 if (all_ptypes[i] & ptype_mask) {
3487                         if (j < num)
3488                                 ptypes[j] = all_ptypes[i];
3489                         j++;
3490                 }
3491
3492         return j;
3493 }
3494
3495 int
3496 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3497                                  uint32_t *set_ptypes, unsigned int num)
3498 {
3499         const uint32_t valid_ptype_masks[] = {
3500                 RTE_PTYPE_L2_MASK,
3501                 RTE_PTYPE_L3_MASK,
3502                 RTE_PTYPE_L4_MASK,
3503                 RTE_PTYPE_TUNNEL_MASK,
3504                 RTE_PTYPE_INNER_L2_MASK,
3505                 RTE_PTYPE_INNER_L3_MASK,
3506                 RTE_PTYPE_INNER_L4_MASK,
3507         };
3508         const uint32_t *all_ptypes;
3509         struct rte_eth_dev *dev;
3510         uint32_t unused_mask;
3511         unsigned int i, j;
3512         int ret;
3513
3514         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3515         dev = &rte_eth_devices[port_id];
3516
3517         if (num > 0 && set_ptypes == NULL) {
3518                 RTE_ETHDEV_LOG(ERR,
3519                         "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3520                         port_id);
3521                 return -EINVAL;
3522         }
3523
3524         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3525                         *dev->dev_ops->dev_ptypes_set == NULL) {
3526                 ret = 0;
3527                 goto ptype_unknown;
3528         }
3529
3530         if (ptype_mask == 0) {
3531                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3532                                 ptype_mask);
3533                 goto ptype_unknown;
3534         }
3535
3536         unused_mask = ptype_mask;
3537         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3538                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3539                 if (mask && mask != valid_ptype_masks[i]) {
3540                         ret = -EINVAL;
3541                         goto ptype_unknown;
3542                 }
3543                 unused_mask &= ~valid_ptype_masks[i];
3544         }
3545
3546         if (unused_mask) {
3547                 ret = -EINVAL;
3548                 goto ptype_unknown;
3549         }
3550
3551         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3552         if (all_ptypes == NULL) {
3553                 ret = 0;
3554                 goto ptype_unknown;
3555         }
3556
3557         /*
3558          * Accommodate as many set_ptypes as possible. If the supplied
3559          * set_ptypes array is insufficient fill it partially.
3560          */
3561         for (i = 0, j = 0; set_ptypes != NULL &&
3562                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3563                 if (ptype_mask & all_ptypes[i]) {
3564                         if (j < num - 1) {
3565                                 set_ptypes[j] = all_ptypes[i];
3566                                 j++;
3567                                 continue;
3568                         }
3569                         break;
3570                 }
3571         }
3572
3573         if (set_ptypes != NULL && j < num)
3574                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3575
3576         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3577
3578 ptype_unknown:
3579         if (num > 0)
3580                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3581
3582         return ret;
3583 }
3584
3585 int
3586 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3587 {
3588         struct rte_eth_dev *dev;
3589
3590         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3591         dev = &rte_eth_devices[port_id];
3592
3593         if (mac_addr == NULL) {
3594                 RTE_ETHDEV_LOG(ERR,
3595                         "Cannot get ethdev port %u MAC address to NULL\n",
3596                         port_id);
3597                 return -EINVAL;
3598         }
3599
3600         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3601
3602         return 0;
3603 }
3604
3605 int
3606 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3607 {
3608         struct rte_eth_dev *dev;
3609
3610         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3611         dev = &rte_eth_devices[port_id];
3612
3613         if (mtu == NULL) {
3614                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3615                         port_id);
3616                 return -EINVAL;
3617         }
3618
3619         *mtu = dev->data->mtu;
3620         return 0;
3621 }
3622
3623 int
3624 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3625 {
3626         int ret;
3627         struct rte_eth_dev_info dev_info;
3628         struct rte_eth_dev *dev;
3629
3630         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3631         dev = &rte_eth_devices[port_id];
3632         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3633
3634         /*
3635          * Check if the device supports dev_infos_get, if it does not
3636          * skip min_mtu/max_mtu validation here as this requires values
3637          * that are populated within the call to rte_eth_dev_info_get()
3638          * which relies on dev->dev_ops->dev_infos_get.
3639          */
3640         if (*dev->dev_ops->dev_infos_get != NULL) {
3641                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3642                 if (ret != 0)
3643                         return ret;
3644
3645                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3646                         return -EINVAL;
3647         }
3648
3649         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3650         if (!ret)
3651                 dev->data->mtu = mtu;
3652
3653         return eth_err(port_id, ret);
3654 }
3655
3656 int
3657 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3658 {
3659         struct rte_eth_dev *dev;
3660         int ret;
3661
3662         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3663         dev = &rte_eth_devices[port_id];
3664
3665         if (!(dev->data->dev_conf.rxmode.offloads &
3666               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3667                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3668                         port_id);
3669                 return -ENOSYS;
3670         }
3671
3672         if (vlan_id > 4095) {
3673                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3674                         port_id, vlan_id);
3675                 return -EINVAL;
3676         }
3677         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3678
3679         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3680         if (ret == 0) {
3681                 struct rte_vlan_filter_conf *vfc;
3682                 int vidx;
3683                 int vbit;
3684
3685                 vfc = &dev->data->vlan_filter_conf;
3686                 vidx = vlan_id / 64;
3687                 vbit = vlan_id % 64;
3688
3689                 if (on)
3690                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3691                 else
3692                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3693         }
3694
3695         return eth_err(port_id, ret);
3696 }
3697
3698 int
3699 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3700                                     int on)
3701 {
3702         struct rte_eth_dev *dev;
3703
3704         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3705         dev = &rte_eth_devices[port_id];
3706
3707         if (rx_queue_id >= dev->data->nb_rx_queues) {
3708                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3709                 return -EINVAL;
3710         }
3711
3712         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3713         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3714
3715         return 0;
3716 }
3717
3718 int
3719 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3720                                 enum rte_vlan_type vlan_type,
3721                                 uint16_t tpid)
3722 {
3723         struct rte_eth_dev *dev;
3724
3725         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3726         dev = &rte_eth_devices[port_id];
3727
3728         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3729         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3730                                                                tpid));
3731 }
3732
3733 int
3734 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3735 {
3736         struct rte_eth_dev_info dev_info;
3737         struct rte_eth_dev *dev;
3738         int ret = 0;
3739         int mask = 0;
3740         int cur, org = 0;
3741         uint64_t orig_offloads;
3742         uint64_t dev_offloads;
3743         uint64_t new_offloads;
3744
3745         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3746         dev = &rte_eth_devices[port_id];
3747
3748         /* save original values in case of failure */
3749         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3750         dev_offloads = orig_offloads;
3751
3752         /* check which option changed by application */
3753         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3754         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3755         if (cur != org) {
3756                 if (cur)
3757                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3758                 else
3759                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3760                 mask |= ETH_VLAN_STRIP_MASK;
3761         }
3762
3763         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3764         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3765         if (cur != org) {
3766                 if (cur)
3767                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3768                 else
3769                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3770                 mask |= ETH_VLAN_FILTER_MASK;
3771         }
3772
3773         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3774         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3775         if (cur != org) {
3776                 if (cur)
3777                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3778                 else
3779                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3780                 mask |= ETH_VLAN_EXTEND_MASK;
3781         }
3782
3783         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3784         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3785         if (cur != org) {
3786                 if (cur)
3787                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3788                 else
3789                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3790                 mask |= ETH_QINQ_STRIP_MASK;
3791         }
3792
3793         /*no change*/
3794         if (mask == 0)
3795                 return ret;
3796
3797         ret = rte_eth_dev_info_get(port_id, &dev_info);
3798         if (ret != 0)
3799                 return ret;
3800
3801         /* Rx VLAN offloading must be within its device capabilities */
3802         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3803                 new_offloads = dev_offloads & ~orig_offloads;
3804                 RTE_ETHDEV_LOG(ERR,
3805                         "Ethdev port_id=%u requested new added VLAN offloads "
3806                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3807                         "0x%" PRIx64 " in %s()\n",
3808                         port_id, new_offloads, dev_info.rx_offload_capa,
3809                         __func__);
3810                 return -EINVAL;
3811         }
3812
3813         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3814         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3815         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3816         if (ret) {
3817                 /* hit an error restore  original values */
3818                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3819         }
3820
3821         return eth_err(port_id, ret);
3822 }
3823
3824 int
3825 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3826 {
3827         struct rte_eth_dev *dev;
3828         uint64_t *dev_offloads;
3829         int ret = 0;
3830
3831         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3832         dev = &rte_eth_devices[port_id];
3833         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3834
3835         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3836                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3837
3838         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3839                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3840
3841         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3842                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3843
3844         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3845                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3846
3847         return ret;
3848 }
3849
3850 int
3851 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3852 {
3853         struct rte_eth_dev *dev;
3854
3855         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3856         dev = &rte_eth_devices[port_id];
3857
3858         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3859         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3860 }
3861
3862 int
3863 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3864 {
3865         struct rte_eth_dev *dev;
3866
3867         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3868         dev = &rte_eth_devices[port_id];
3869
3870         if (fc_conf == NULL) {
3871                 RTE_ETHDEV_LOG(ERR,
3872                         "Cannot get ethdev port %u flow control config to NULL\n",
3873                         port_id);
3874                 return -EINVAL;
3875         }
3876
3877         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3878         memset(fc_conf, 0, sizeof(*fc_conf));
3879         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3880 }
3881
3882 int
3883 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3884 {
3885         struct rte_eth_dev *dev;
3886
3887         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3888         dev = &rte_eth_devices[port_id];
3889
3890         if (fc_conf == NULL) {
3891                 RTE_ETHDEV_LOG(ERR,
3892                         "Cannot set ethdev port %u flow control from NULL config\n",
3893                         port_id);
3894                 return -EINVAL;
3895         }
3896
3897         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3898                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3899                 return -EINVAL;
3900         }
3901
3902         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3903         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3904 }
3905
3906 int
3907 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3908                                    struct rte_eth_pfc_conf *pfc_conf)
3909 {
3910         struct rte_eth_dev *dev;
3911
3912         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3913         dev = &rte_eth_devices[port_id];
3914
3915         if (pfc_conf == NULL) {
3916                 RTE_ETHDEV_LOG(ERR,
3917                         "Cannot set ethdev port %u priority flow control from NULL config\n",
3918                         port_id);
3919                 return -EINVAL;
3920         }
3921
3922         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3923                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3924                 return -EINVAL;
3925         }
3926
3927         /* High water, low water validation are device specific */
3928         if  (*dev->dev_ops->priority_flow_ctrl_set)
3929                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3930                                         (dev, pfc_conf));
3931         return -ENOTSUP;
3932 }
3933
3934 static int
3935 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3936                         uint16_t reta_size)
3937 {
3938         uint16_t i, num;
3939
3940         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3941         for (i = 0; i < num; i++) {
3942                 if (reta_conf[i].mask)
3943                         return 0;
3944         }
3945
3946         return -EINVAL;
3947 }
3948
3949 static int
3950 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3951                          uint16_t reta_size,
3952                          uint16_t max_rxq)
3953 {
3954         uint16_t i, idx, shift;
3955
3956         if (max_rxq == 0) {
3957                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3958                 return -EINVAL;
3959         }
3960
3961         for (i = 0; i < reta_size; i++) {
3962                 idx = i / RTE_RETA_GROUP_SIZE;
3963                 shift = i % RTE_RETA_GROUP_SIZE;
3964                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3965                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3966                         RTE_ETHDEV_LOG(ERR,
3967                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3968                                 idx, shift,
3969                                 reta_conf[idx].reta[shift], max_rxq);
3970                         return -EINVAL;
3971                 }
3972         }
3973
3974         return 0;
3975 }
3976
3977 int
3978 rte_eth_dev_rss_reta_update(uint16_t port_id,
3979                             struct rte_eth_rss_reta_entry64 *reta_conf,
3980                             uint16_t reta_size)
3981 {
3982         struct rte_eth_dev *dev;
3983         int ret;
3984
3985         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3986         dev = &rte_eth_devices[port_id];
3987
3988         if (reta_conf == NULL) {
3989                 RTE_ETHDEV_LOG(ERR,
3990                         "Cannot update ethdev port %u RSS RETA to NULL\n",
3991                         port_id);
3992                 return -EINVAL;
3993         }
3994
3995         if (reta_size == 0) {
3996                 RTE_ETHDEV_LOG(ERR,
3997                         "Cannot update ethdev port %u RSS RETA with zero size\n",
3998                         port_id);
3999                 return -EINVAL;
4000         }
4001
4002         /* Check mask bits */
4003         ret = eth_check_reta_mask(reta_conf, reta_size);
4004         if (ret < 0)
4005                 return ret;
4006
4007         /* Check entry value */
4008         ret = eth_check_reta_entry(reta_conf, reta_size,
4009                                 dev->data->nb_rx_queues);
4010         if (ret < 0)
4011                 return ret;
4012
4013         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
4014         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
4015                                                              reta_size));
4016 }
4017
4018 int
4019 rte_eth_dev_rss_reta_query(uint16_t port_id,
4020                            struct rte_eth_rss_reta_entry64 *reta_conf,
4021                            uint16_t reta_size)
4022 {
4023         struct rte_eth_dev *dev;
4024         int ret;
4025
4026         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4027         dev = &rte_eth_devices[port_id];
4028
4029         if (reta_conf == NULL) {
4030                 RTE_ETHDEV_LOG(ERR,
4031                         "Cannot query ethdev port %u RSS RETA from NULL config\n",
4032                         port_id);
4033                 return -EINVAL;
4034         }
4035
4036         /* Check mask bits */
4037         ret = eth_check_reta_mask(reta_conf, reta_size);
4038         if (ret < 0)
4039                 return ret;
4040
4041         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
4042         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4043                                                             reta_size));
4044 }
4045
4046 int
4047 rte_eth_dev_rss_hash_update(uint16_t port_id,
4048                             struct rte_eth_rss_conf *rss_conf)
4049 {
4050         struct rte_eth_dev *dev;
4051         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
4052         int ret;
4053
4054         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4055         dev = &rte_eth_devices[port_id];
4056
4057         if (rss_conf == NULL) {
4058                 RTE_ETHDEV_LOG(ERR,
4059                         "Cannot update ethdev port %u RSS hash from NULL config\n",
4060                         port_id);
4061                 return -EINVAL;
4062         }
4063
4064         ret = rte_eth_dev_info_get(port_id, &dev_info);
4065         if (ret != 0)
4066                 return ret;
4067
4068         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4069         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4070             dev_info.flow_type_rss_offloads) {
4071                 RTE_ETHDEV_LOG(ERR,
4072                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4073                         port_id, rss_conf->rss_hf,
4074                         dev_info.flow_type_rss_offloads);
4075                 return -EINVAL;
4076         }
4077         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
4078         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4079                                                                  rss_conf));
4080 }
4081
4082 int
4083 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4084                               struct rte_eth_rss_conf *rss_conf)
4085 {
4086         struct rte_eth_dev *dev;
4087
4088         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4089         dev = &rte_eth_devices[port_id];
4090
4091         if (rss_conf == NULL) {
4092                 RTE_ETHDEV_LOG(ERR,
4093                         "Cannot get ethdev port %u RSS hash config to NULL\n",
4094                         port_id);
4095                 return -EINVAL;
4096         }
4097
4098         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
4099         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4100                                                                    rss_conf));
4101 }
4102
4103 int
4104 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4105                                 struct rte_eth_udp_tunnel *udp_tunnel)
4106 {
4107         struct rte_eth_dev *dev;
4108
4109         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4110         dev = &rte_eth_devices[port_id];
4111
4112         if (udp_tunnel == NULL) {
4113                 RTE_ETHDEV_LOG(ERR,
4114                         "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4115                         port_id);
4116                 return -EINVAL;
4117         }
4118
4119         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
4120                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4121                 return -EINVAL;
4122         }
4123
4124         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
4125         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4126                                                                 udp_tunnel));
4127 }
4128
4129 int
4130 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4131                                    struct rte_eth_udp_tunnel *udp_tunnel)
4132 {
4133         struct rte_eth_dev *dev;
4134
4135         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4136         dev = &rte_eth_devices[port_id];
4137
4138         if (udp_tunnel == NULL) {
4139                 RTE_ETHDEV_LOG(ERR,
4140                         "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4141                         port_id);
4142                 return -EINVAL;
4143         }
4144
4145         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
4146                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4147                 return -EINVAL;
4148         }
4149
4150         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
4151         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4152                                                                 udp_tunnel));
4153 }
4154
4155 int
4156 rte_eth_led_on(uint16_t port_id)
4157 {
4158         struct rte_eth_dev *dev;
4159
4160         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4161         dev = &rte_eth_devices[port_id];
4162
4163         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
4164         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4165 }
4166
4167 int
4168 rte_eth_led_off(uint16_t port_id)
4169 {
4170         struct rte_eth_dev *dev;
4171
4172         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4173         dev = &rte_eth_devices[port_id];
4174
4175         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
4176         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4177 }
4178
4179 int
4180 rte_eth_fec_get_capability(uint16_t port_id,
4181                            struct rte_eth_fec_capa *speed_fec_capa,
4182                            unsigned int num)
4183 {
4184         struct rte_eth_dev *dev;
4185         int ret;
4186
4187         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4188         dev = &rte_eth_devices[port_id];
4189
4190         if (speed_fec_capa == NULL && num > 0) {
4191                 RTE_ETHDEV_LOG(ERR,
4192                         "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4193                         port_id);
4194                 return -EINVAL;
4195         }
4196
4197         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
4198         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4199
4200         return ret;
4201 }
4202
4203 int
4204 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4205 {
4206         struct rte_eth_dev *dev;
4207
4208         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4209         dev = &rte_eth_devices[port_id];
4210
4211         if (fec_capa == NULL) {
4212                 RTE_ETHDEV_LOG(ERR,
4213                         "Cannot get ethdev port %u current FEC mode to NULL\n",
4214                         port_id);
4215                 return -EINVAL;
4216         }
4217
4218         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
4219         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4220 }
4221
4222 int
4223 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4224 {
4225         struct rte_eth_dev *dev;
4226
4227         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4228         dev = &rte_eth_devices[port_id];
4229
4230         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
4231         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4232 }
4233
4234 /*
4235  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4236  * an empty spot.
4237  */
4238 static int
4239 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4240 {
4241         struct rte_eth_dev_info dev_info;
4242         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4243         unsigned i;
4244         int ret;
4245
4246         ret = rte_eth_dev_info_get(port_id, &dev_info);
4247         if (ret != 0)
4248                 return -1;
4249
4250         for (i = 0; i < dev_info.max_mac_addrs; i++)
4251                 if (memcmp(addr, &dev->data->mac_addrs[i],
4252                                 RTE_ETHER_ADDR_LEN) == 0)
4253                         return i;
4254
4255         return -1;
4256 }
4257
4258 static const struct rte_ether_addr null_mac_addr;
4259
4260 int
4261 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4262                         uint32_t pool)
4263 {
4264         struct rte_eth_dev *dev;
4265         int index;
4266         uint64_t pool_mask;
4267         int ret;
4268
4269         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4270         dev = &rte_eth_devices[port_id];
4271
4272         if (addr == NULL) {
4273                 RTE_ETHDEV_LOG(ERR,
4274                         "Cannot add ethdev port %u MAC address from NULL address\n",
4275                         port_id);
4276                 return -EINVAL;
4277         }
4278
4279         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4280
4281         if (rte_is_zero_ether_addr(addr)) {
4282                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4283                         port_id);
4284                 return -EINVAL;
4285         }
4286         if (pool >= ETH_64_POOLS) {
4287                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
4288                 return -EINVAL;
4289         }
4290
4291         index = eth_dev_get_mac_addr_index(port_id, addr);
4292         if (index < 0) {
4293                 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4294                 if (index < 0) {
4295                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4296                                 port_id);
4297                         return -ENOSPC;
4298                 }
4299         } else {
4300                 pool_mask = dev->data->mac_pool_sel[index];
4301
4302                 /* Check if both MAC address and pool is already there, and do nothing */
4303                 if (pool_mask & (1ULL << pool))
4304                         return 0;
4305         }
4306
4307         /* Update NIC */
4308         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4309
4310         if (ret == 0) {
4311                 /* Update address in NIC data structure */
4312                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4313
4314                 /* Update pool bitmap in NIC data structure */
4315                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
4316         }
4317
4318         return eth_err(port_id, ret);
4319 }
4320
4321 int
4322 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4323 {
4324         struct rte_eth_dev *dev;
4325         int index;
4326
4327         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4328         dev = &rte_eth_devices[port_id];
4329
4330         if (addr == NULL) {
4331                 RTE_ETHDEV_LOG(ERR,
4332                         "Cannot remove ethdev port %u MAC address from NULL address\n",
4333                         port_id);
4334                 return -EINVAL;
4335         }
4336
4337         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4338
4339         index = eth_dev_get_mac_addr_index(port_id, addr);
4340         if (index == 0) {
4341                 RTE_ETHDEV_LOG(ERR,
4342                         "Port %u: Cannot remove default MAC address\n",
4343                         port_id);
4344                 return -EADDRINUSE;
4345         } else if (index < 0)
4346                 return 0;  /* Do nothing if address wasn't found */
4347
4348         /* Update NIC */
4349         (*dev->dev_ops->mac_addr_remove)(dev, index);
4350
4351         /* Update address in NIC data structure */
4352         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4353
4354         /* reset pool bitmap */
4355         dev->data->mac_pool_sel[index] = 0;
4356
4357         return 0;
4358 }
4359
4360 int
4361 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4362 {
4363         struct rte_eth_dev *dev;
4364         int ret;
4365
4366         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4367         dev = &rte_eth_devices[port_id];
4368
4369         if (addr == NULL) {
4370                 RTE_ETHDEV_LOG(ERR,
4371                         "Cannot set ethdev port %u default MAC address from NULL address\n",
4372                         port_id);
4373                 return -EINVAL;
4374         }
4375
4376         if (!rte_is_valid_assigned_ether_addr(addr))
4377                 return -EINVAL;
4378
4379         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4380
4381         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4382         if (ret < 0)
4383                 return ret;
4384
4385         /* Update default address in NIC data structure */
4386         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4387
4388         return 0;
4389 }
4390
4391
4392 /*
4393  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4394  * an empty spot.
4395  */
4396 static int
4397 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4398                 const struct rte_ether_addr *addr)
4399 {
4400         struct rte_eth_dev_info dev_info;
4401         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4402         unsigned i;
4403         int ret;
4404
4405         ret = rte_eth_dev_info_get(port_id, &dev_info);
4406         if (ret != 0)
4407                 return -1;
4408
4409         if (!dev->data->hash_mac_addrs)
4410                 return -1;
4411
4412         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4413                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4414                         RTE_ETHER_ADDR_LEN) == 0)
4415                         return i;
4416
4417         return -1;
4418 }
4419
4420 int
4421 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4422                                 uint8_t on)
4423 {
4424         int index;
4425         int ret;
4426         struct rte_eth_dev *dev;
4427
4428         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4429         dev = &rte_eth_devices[port_id];
4430
4431         if (addr == NULL) {
4432                 RTE_ETHDEV_LOG(ERR,
4433                         "Cannot set ethdev port %u unicast hash table from NULL address\n",
4434                         port_id);
4435                 return -EINVAL;
4436         }
4437
4438         if (rte_is_zero_ether_addr(addr)) {
4439                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4440                         port_id);
4441                 return -EINVAL;
4442         }
4443
4444         index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4445         /* Check if it's already there, and do nothing */
4446         if ((index >= 0) && on)
4447                 return 0;
4448
4449         if (index < 0) {
4450                 if (!on) {
4451                         RTE_ETHDEV_LOG(ERR,
4452                                 "Port %u: the MAC address was not set in UTA\n",
4453                                 port_id);
4454                         return -EINVAL;
4455                 }
4456
4457                 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4458                 if (index < 0) {
4459                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4460                                 port_id);
4461                         return -ENOSPC;
4462                 }
4463         }
4464
4465         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4466         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4467         if (ret == 0) {
4468                 /* Update address in NIC data structure */
4469                 if (on)
4470                         rte_ether_addr_copy(addr,
4471                                         &dev->data->hash_mac_addrs[index]);
4472                 else
4473                         rte_ether_addr_copy(&null_mac_addr,
4474                                         &dev->data->hash_mac_addrs[index]);
4475         }
4476
4477         return eth_err(port_id, ret);
4478 }
4479
4480 int
4481 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4482 {
4483         struct rte_eth_dev *dev;
4484
4485         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4486         dev = &rte_eth_devices[port_id];
4487
4488         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4489         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4490                                                                        on));
4491 }
4492
4493 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4494                                         uint16_t tx_rate)
4495 {
4496         struct rte_eth_dev *dev;
4497         struct rte_eth_dev_info dev_info;
4498         struct rte_eth_link link;
4499         int ret;
4500
4501         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4502         dev = &rte_eth_devices[port_id];
4503
4504         ret = rte_eth_dev_info_get(port_id, &dev_info);
4505         if (ret != 0)
4506                 return ret;
4507
4508         link = dev->data->dev_link;
4509
4510         if (queue_idx > dev_info.max_tx_queues) {
4511                 RTE_ETHDEV_LOG(ERR,
4512                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4513                         port_id, queue_idx);
4514                 return -EINVAL;
4515         }
4516
4517         if (tx_rate > link.link_speed) {
4518                 RTE_ETHDEV_LOG(ERR,
4519                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4520                         tx_rate, link.link_speed);
4521                 return -EINVAL;
4522         }
4523
4524         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4525         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4526                                                         queue_idx, tx_rate));
4527 }
4528
4529 int
4530 rte_eth_mirror_rule_set(uint16_t port_id,
4531                         struct rte_eth_mirror_conf *mirror_conf,
4532                         uint8_t rule_id, uint8_t on)
4533 {
4534         struct rte_eth_dev *dev;
4535
4536         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4537         dev = &rte_eth_devices[port_id];
4538
4539         if (mirror_conf == NULL) {
4540                 RTE_ETHDEV_LOG(ERR,
4541                         "Cannot set ethdev port %u mirror rule from NULL config\n",
4542                         port_id);
4543                 return -EINVAL;
4544         }
4545
4546         if (mirror_conf->rule_type == 0) {
4547                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4548                 return -EINVAL;
4549         }
4550
4551         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4552                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4553                         ETH_64_POOLS - 1);
4554                 return -EINVAL;
4555         }
4556
4557         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4558              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4559             (mirror_conf->pool_mask == 0)) {
4560                 RTE_ETHDEV_LOG(ERR,
4561                         "Invalid mirror pool, pool mask can not be 0\n");
4562                 return -EINVAL;
4563         }
4564
4565         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4566             mirror_conf->vlan.vlan_mask == 0) {
4567                 RTE_ETHDEV_LOG(ERR,
4568                         "Invalid vlan mask, vlan mask can not be 0\n");
4569                 return -EINVAL;
4570         }
4571
4572         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4573
4574         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4575                                                 mirror_conf, rule_id, on));
4576 }
4577
4578 int
4579 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4580 {
4581         struct rte_eth_dev *dev;
4582
4583         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4584         dev = &rte_eth_devices[port_id];
4585
4586         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4587         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev, rule_id));
4588 }
4589
4590 RTE_INIT(eth_dev_init_cb_lists)
4591 {
4592         uint16_t i;
4593
4594         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4595                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4596 }
4597
4598 int
4599 rte_eth_dev_callback_register(uint16_t port_id,
4600                         enum rte_eth_event_type event,
4601                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4602 {
4603         struct rte_eth_dev *dev;
4604         struct rte_eth_dev_callback *user_cb;
4605         uint16_t next_port;
4606         uint16_t last_port;
4607
4608         if (cb_fn == NULL) {
4609                 RTE_ETHDEV_LOG(ERR,
4610                         "Cannot register ethdev port %u callback from NULL\n",
4611                         port_id);
4612                 return -EINVAL;
4613         }
4614
4615         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4616                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4617                 return -EINVAL;
4618         }
4619
4620         if (port_id == RTE_ETH_ALL) {
4621                 next_port = 0;
4622                 last_port = RTE_MAX_ETHPORTS - 1;
4623         } else {
4624                 next_port = last_port = port_id;
4625         }
4626
4627         rte_spinlock_lock(&eth_dev_cb_lock);
4628
4629         do {
4630                 dev = &rte_eth_devices[next_port];
4631
4632                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4633                         if (user_cb->cb_fn == cb_fn &&
4634                                 user_cb->cb_arg == cb_arg &&
4635                                 user_cb->event == event) {
4636                                 break;
4637                         }
4638                 }
4639
4640                 /* create a new callback. */
4641                 if (user_cb == NULL) {
4642                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4643                                 sizeof(struct rte_eth_dev_callback), 0);
4644                         if (user_cb != NULL) {
4645                                 user_cb->cb_fn = cb_fn;
4646                                 user_cb->cb_arg = cb_arg;
4647                                 user_cb->event = event;
4648                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4649                                                   user_cb, next);
4650                         } else {
4651                                 rte_spinlock_unlock(&eth_dev_cb_lock);
4652                                 rte_eth_dev_callback_unregister(port_id, event,
4653                                                                 cb_fn, cb_arg);
4654                                 return -ENOMEM;
4655                         }
4656
4657                 }
4658         } while (++next_port <= last_port);
4659
4660         rte_spinlock_unlock(&eth_dev_cb_lock);
4661         return 0;
4662 }
4663
4664 int
4665 rte_eth_dev_callback_unregister(uint16_t port_id,
4666                         enum rte_eth_event_type event,
4667                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4668 {
4669         int ret;
4670         struct rte_eth_dev *dev;
4671         struct rte_eth_dev_callback *cb, *next;
4672         uint16_t next_port;
4673         uint16_t last_port;
4674
4675         if (cb_fn == NULL) {
4676                 RTE_ETHDEV_LOG(ERR,
4677                         "Cannot unregister ethdev port %u callback from NULL\n",
4678                         port_id);
4679                 return -EINVAL;
4680         }
4681
4682         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4683                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4684                 return -EINVAL;
4685         }
4686
4687         if (port_id == RTE_ETH_ALL) {
4688                 next_port = 0;
4689                 last_port = RTE_MAX_ETHPORTS - 1;
4690         } else {
4691                 next_port = last_port = port_id;
4692         }
4693
4694         rte_spinlock_lock(&eth_dev_cb_lock);
4695
4696         do {
4697                 dev = &rte_eth_devices[next_port];
4698                 ret = 0;
4699                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4700                      cb = next) {
4701
4702                         next = TAILQ_NEXT(cb, next);
4703
4704                         if (cb->cb_fn != cb_fn || cb->event != event ||
4705                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4706                                 continue;
4707
4708                         /*
4709                          * if this callback is not executing right now,
4710                          * then remove it.
4711                          */
4712                         if (cb->active == 0) {
4713                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4714                                 rte_free(cb);
4715                         } else {
4716                                 ret = -EAGAIN;
4717                         }
4718                 }
4719         } while (++next_port <= last_port);
4720
4721         rte_spinlock_unlock(&eth_dev_cb_lock);
4722         return ret;
4723 }
4724
4725 int
4726 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4727         enum rte_eth_event_type event, void *ret_param)
4728 {
4729         struct rte_eth_dev_callback *cb_lst;
4730         struct rte_eth_dev_callback dev_cb;
4731         int rc = 0;
4732
4733         rte_spinlock_lock(&eth_dev_cb_lock);
4734         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4735                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4736                         continue;
4737                 dev_cb = *cb_lst;
4738                 cb_lst->active = 1;
4739                 if (ret_param != NULL)
4740                         dev_cb.ret_param = ret_param;
4741
4742                 rte_spinlock_unlock(&eth_dev_cb_lock);
4743                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4744                                 dev_cb.cb_arg, dev_cb.ret_param);
4745                 rte_spinlock_lock(&eth_dev_cb_lock);
4746                 cb_lst->active = 0;
4747         }
4748         rte_spinlock_unlock(&eth_dev_cb_lock);
4749         return rc;
4750 }
4751
4752 void
4753 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4754 {
4755         if (dev == NULL)
4756                 return;
4757
4758         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4759
4760         dev->state = RTE_ETH_DEV_ATTACHED;
4761 }
4762
4763 int
4764 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4765 {
4766         uint32_t vec;
4767         struct rte_eth_dev *dev;
4768         struct rte_intr_handle *intr_handle;
4769         uint16_t qid;
4770         int rc;
4771
4772         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4773         dev = &rte_eth_devices[port_id];
4774
4775         if (!dev->intr_handle) {
4776                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4777                 return -ENOTSUP;
4778         }
4779
4780         intr_handle = dev->intr_handle;
4781         if (!intr_handle->intr_vec) {
4782                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4783                 return -EPERM;
4784         }
4785
4786         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4787                 vec = intr_handle->intr_vec[qid];
4788                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4789                 if (rc && rc != -EEXIST) {
4790                         RTE_ETHDEV_LOG(ERR,
4791                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4792                                 port_id, qid, op, epfd, vec);
4793                 }
4794         }
4795
4796         return 0;
4797 }
4798
4799 int
4800 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4801 {
4802         struct rte_intr_handle *intr_handle;
4803         struct rte_eth_dev *dev;
4804         unsigned int efd_idx;
4805         uint32_t vec;
4806         int fd;
4807
4808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4809         dev = &rte_eth_devices[port_id];
4810
4811         if (queue_id >= dev->data->nb_rx_queues) {
4812                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4813                 return -1;
4814         }
4815
4816         if (!dev->intr_handle) {
4817                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4818                 return -1;
4819         }
4820
4821         intr_handle = dev->intr_handle;
4822         if (!intr_handle->intr_vec) {
4823                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4824                 return -1;
4825         }
4826
4827         vec = intr_handle->intr_vec[queue_id];
4828         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4829                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4830         fd = intr_handle->efds[efd_idx];
4831
4832         return fd;
4833 }
4834
4835 static inline int
4836 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4837                 const char *ring_name)
4838 {
4839         return snprintf(name, len, "eth_p%d_q%d_%s",
4840                         port_id, queue_id, ring_name);
4841 }
4842
4843 const struct rte_memzone *
4844 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4845                          uint16_t queue_id, size_t size, unsigned align,
4846                          int socket_id)
4847 {
4848         char z_name[RTE_MEMZONE_NAMESIZE];
4849         const struct rte_memzone *mz;
4850         int rc;
4851
4852         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4853                         queue_id, ring_name);
4854         if (rc >= RTE_MEMZONE_NAMESIZE) {
4855                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4856                 rte_errno = ENAMETOOLONG;
4857                 return NULL;
4858         }
4859
4860         mz = rte_memzone_lookup(z_name);
4861         if (mz) {
4862                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4863                                 size > mz->len ||
4864                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4865                         RTE_ETHDEV_LOG(ERR,
4866                                 "memzone %s does not justify the requested attributes\n",
4867                                 mz->name);
4868                         return NULL;
4869                 }
4870
4871                 return mz;
4872         }
4873
4874         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4875                         RTE_MEMZONE_IOVA_CONTIG, align);
4876 }
4877
4878 int
4879 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4880                 uint16_t queue_id)
4881 {
4882         char z_name[RTE_MEMZONE_NAMESIZE];
4883         const struct rte_memzone *mz;
4884         int rc = 0;
4885
4886         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4887                         queue_id, ring_name);
4888         if (rc >= RTE_MEMZONE_NAMESIZE) {
4889                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4890                 return -ENAMETOOLONG;
4891         }
4892
4893         mz = rte_memzone_lookup(z_name);
4894         if (mz)
4895                 rc = rte_memzone_free(mz);
4896         else
4897                 rc = -ENOENT;
4898
4899         return rc;
4900 }
4901
4902 int
4903 rte_eth_dev_create(struct rte_device *device, const char *name,
4904         size_t priv_data_size,
4905         ethdev_bus_specific_init ethdev_bus_specific_init,
4906         void *bus_init_params,
4907         ethdev_init_t ethdev_init, void *init_params)
4908 {
4909         struct rte_eth_dev *ethdev;
4910         int retval;
4911
4912         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4913
4914         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4915                 ethdev = rte_eth_dev_allocate(name);
4916                 if (!ethdev)
4917                         return -ENODEV;
4918
4919                 if (priv_data_size) {
4920                         ethdev->data->dev_private = rte_zmalloc_socket(
4921                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4922                                 device->numa_node);
4923
4924                         if (!ethdev->data->dev_private) {
4925                                 RTE_ETHDEV_LOG(ERR,
4926                                         "failed to allocate private data\n");
4927                                 retval = -ENOMEM;
4928                                 goto probe_failed;
4929                         }
4930                 }
4931         } else {
4932                 ethdev = rte_eth_dev_attach_secondary(name);
4933                 if (!ethdev) {
4934                         RTE_ETHDEV_LOG(ERR,
4935                                 "secondary process attach failed, ethdev doesn't exist\n");
4936                         return  -ENODEV;
4937                 }
4938         }
4939
4940         ethdev->device = device;
4941
4942         if (ethdev_bus_specific_init) {
4943                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4944                 if (retval) {
4945                         RTE_ETHDEV_LOG(ERR,
4946                                 "ethdev bus specific initialisation failed\n");
4947                         goto probe_failed;
4948                 }
4949         }
4950
4951         retval = ethdev_init(ethdev, init_params);
4952         if (retval) {
4953                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4954                 goto probe_failed;
4955         }
4956
4957         rte_eth_dev_probing_finish(ethdev);
4958
4959         return retval;
4960
4961 probe_failed:
4962         rte_eth_dev_release_port(ethdev);
4963         return retval;
4964 }
4965
4966 int
4967 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4968         ethdev_uninit_t ethdev_uninit)
4969 {
4970         int ret;
4971
4972         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4973         if (!ethdev)
4974                 return -ENODEV;
4975
4976         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4977
4978         ret = ethdev_uninit(ethdev);
4979         if (ret)
4980                 return ret;
4981
4982         return rte_eth_dev_release_port(ethdev);
4983 }
4984
4985 int
4986 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4987                           int epfd, int op, void *data)
4988 {
4989         uint32_t vec;
4990         struct rte_eth_dev *dev;
4991         struct rte_intr_handle *intr_handle;
4992         int rc;
4993
4994         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4995         dev = &rte_eth_devices[port_id];
4996
4997         if (queue_id >= dev->data->nb_rx_queues) {
4998                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4999                 return -EINVAL;
5000         }
5001
5002         if (!dev->intr_handle) {
5003                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
5004                 return -ENOTSUP;
5005         }
5006
5007         intr_handle = dev->intr_handle;
5008         if (!intr_handle->intr_vec) {
5009                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
5010                 return -EPERM;
5011         }
5012
5013         vec = intr_handle->intr_vec[queue_id];
5014         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
5015         if (rc && rc != -EEXIST) {
5016                 RTE_ETHDEV_LOG(ERR,
5017                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
5018                         port_id, queue_id, op, epfd, vec);
5019                 return rc;
5020         }
5021
5022         return 0;
5023 }
5024
5025 int
5026 rte_eth_dev_rx_intr_enable(uint16_t port_id,
5027                            uint16_t queue_id)
5028 {
5029         struct rte_eth_dev *dev;
5030         int ret;
5031
5032         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5033         dev = &rte_eth_devices[port_id];
5034
5035         ret = eth_dev_validate_rx_queue(dev, queue_id);
5036         if (ret != 0)
5037                 return ret;
5038
5039         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
5040         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
5041 }
5042
5043 int
5044 rte_eth_dev_rx_intr_disable(uint16_t port_id,
5045                             uint16_t queue_id)
5046 {
5047         struct rte_eth_dev *dev;
5048         int ret;
5049
5050         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5051         dev = &rte_eth_devices[port_id];
5052
5053         ret = eth_dev_validate_rx_queue(dev, queue_id);
5054         if (ret != 0)
5055                 return ret;
5056
5057         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
5058         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
5059 }
5060
5061
5062 const struct rte_eth_rxtx_callback *
5063 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
5064                 rte_rx_callback_fn fn, void *user_param)
5065 {
5066 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5067         rte_errno = ENOTSUP;
5068         return NULL;
5069 #endif
5070         struct rte_eth_dev *dev;
5071
5072         /* check input parameters */
5073         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5074                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5075                 rte_errno = EINVAL;
5076                 return NULL;
5077         }
5078         dev = &rte_eth_devices[port_id];
5079         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5080                 rte_errno = EINVAL;
5081                 return NULL;
5082         }
5083         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5084
5085         if (cb == NULL) {
5086                 rte_errno = ENOMEM;
5087                 return NULL;
5088         }
5089
5090         cb->fn.rx = fn;
5091         cb->param = user_param;
5092
5093         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5094         /* Add the callbacks in fifo order. */
5095         struct rte_eth_rxtx_callback *tail =
5096                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5097
5098         if (!tail) {
5099                 /* Stores to cb->fn and cb->param should complete before
5100                  * cb is visible to data plane.
5101                  */
5102                 __atomic_store_n(
5103                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5104                         cb, __ATOMIC_RELEASE);
5105
5106         } else {
5107                 while (tail->next)
5108                         tail = tail->next;
5109                 /* Stores to cb->fn and cb->param should complete before
5110                  * cb is visible to data plane.
5111                  */
5112                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5113         }
5114         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5115
5116         return cb;
5117 }
5118
5119 const struct rte_eth_rxtx_callback *
5120 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5121                 rte_rx_callback_fn fn, void *user_param)
5122 {
5123 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5124         rte_errno = ENOTSUP;
5125         return NULL;
5126 #endif
5127         /* check input parameters */
5128         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5129                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5130                 rte_errno = EINVAL;
5131                 return NULL;
5132         }
5133
5134         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5135
5136         if (cb == NULL) {
5137                 rte_errno = ENOMEM;
5138                 return NULL;
5139         }
5140
5141         cb->fn.rx = fn;
5142         cb->param = user_param;
5143
5144         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5145         /* Add the callbacks at first position */
5146         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5147         /* Stores to cb->fn, cb->param and cb->next should complete before
5148          * cb is visible to data plane threads.
5149          */
5150         __atomic_store_n(
5151                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5152                 cb, __ATOMIC_RELEASE);
5153         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5154
5155         return cb;
5156 }
5157
5158 const struct rte_eth_rxtx_callback *
5159 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5160                 rte_tx_callback_fn fn, void *user_param)
5161 {
5162 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5163         rte_errno = ENOTSUP;
5164         return NULL;
5165 #endif
5166         struct rte_eth_dev *dev;
5167
5168         /* check input parameters */
5169         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5170                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
5171                 rte_errno = EINVAL;
5172                 return NULL;
5173         }
5174
5175         dev = &rte_eth_devices[port_id];
5176         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5177                 rte_errno = EINVAL;
5178                 return NULL;
5179         }
5180
5181         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5182
5183         if (cb == NULL) {
5184                 rte_errno = ENOMEM;
5185                 return NULL;
5186         }
5187
5188         cb->fn.tx = fn;
5189         cb->param = user_param;
5190
5191         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5192         /* Add the callbacks in fifo order. */
5193         struct rte_eth_rxtx_callback *tail =
5194                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
5195
5196         if (!tail) {
5197                 /* Stores to cb->fn and cb->param should complete before
5198                  * cb is visible to data plane.
5199                  */
5200                 __atomic_store_n(
5201                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
5202                         cb, __ATOMIC_RELEASE);
5203
5204         } else {
5205                 while (tail->next)
5206                         tail = tail->next;
5207                 /* Stores to cb->fn and cb->param should complete before
5208                  * cb is visible to data plane.
5209                  */
5210                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5211         }
5212         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5213
5214         return cb;
5215 }
5216
5217 int
5218 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5219                 const struct rte_eth_rxtx_callback *user_cb)
5220 {
5221 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5222         return -ENOTSUP;
5223 #endif
5224         /* Check input parameters. */
5225         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5226         if (user_cb == NULL ||
5227                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
5228                 return -EINVAL;
5229
5230         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5231         struct rte_eth_rxtx_callback *cb;
5232         struct rte_eth_rxtx_callback **prev_cb;
5233         int ret = -EINVAL;
5234
5235         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5236         prev_cb = &dev->post_rx_burst_cbs[queue_id];
5237         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5238                 cb = *prev_cb;
5239                 if (cb == user_cb) {
5240                         /* Remove the user cb from the callback list. */
5241                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5242                         ret = 0;
5243                         break;
5244                 }
5245         }
5246         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5247
5248         return ret;
5249 }
5250
5251 int
5252 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5253                 const struct rte_eth_rxtx_callback *user_cb)
5254 {
5255 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5256         return -ENOTSUP;
5257 #endif
5258         /* Check input parameters. */
5259         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5260         if (user_cb == NULL ||
5261                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5262                 return -EINVAL;
5263
5264         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5265         int ret = -EINVAL;
5266         struct rte_eth_rxtx_callback *cb;
5267         struct rte_eth_rxtx_callback **prev_cb;
5268
5269         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5270         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5271         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5272                 cb = *prev_cb;
5273                 if (cb == user_cb) {
5274                         /* Remove the user cb from the callback list. */
5275                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5276                         ret = 0;
5277                         break;
5278                 }
5279         }
5280         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5281
5282         return ret;
5283 }
5284
5285 int
5286 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5287         struct rte_eth_rxq_info *qinfo)
5288 {
5289         struct rte_eth_dev *dev;
5290
5291         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5292         dev = &rte_eth_devices[port_id];
5293
5294         if (queue_id >= dev->data->nb_rx_queues) {
5295                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5296                 return -EINVAL;
5297         }
5298
5299         if (qinfo == NULL) {
5300                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5301                         port_id, queue_id);
5302                 return -EINVAL;
5303         }
5304
5305         if (dev->data->rx_queues == NULL ||
5306                         dev->data->rx_queues[queue_id] == NULL) {
5307                 RTE_ETHDEV_LOG(ERR,
5308                                "Rx queue %"PRIu16" of device with port_id=%"
5309                                PRIu16" has not been setup\n",
5310                                queue_id, port_id);
5311                 return -EINVAL;
5312         }
5313
5314         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5315                 RTE_ETHDEV_LOG(INFO,
5316                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5317                         queue_id, port_id);
5318                 return -EINVAL;
5319         }
5320
5321         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5322
5323         memset(qinfo, 0, sizeof(*qinfo));
5324         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5325         qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5326
5327         return 0;
5328 }
5329
5330 int
5331 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5332         struct rte_eth_txq_info *qinfo)
5333 {
5334         struct rte_eth_dev *dev;
5335
5336         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5337         dev = &rte_eth_devices[port_id];
5338
5339         if (queue_id >= dev->data->nb_tx_queues) {
5340                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5341                 return -EINVAL;
5342         }
5343
5344         if (qinfo == NULL) {
5345                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5346                         port_id, queue_id);
5347                 return -EINVAL;
5348         }
5349
5350         if (dev->data->tx_queues == NULL ||
5351                         dev->data->tx_queues[queue_id] == NULL) {
5352                 RTE_ETHDEV_LOG(ERR,
5353                                "Tx queue %"PRIu16" of device with port_id=%"
5354                                PRIu16" has not been setup\n",
5355                                queue_id, port_id);
5356                 return -EINVAL;
5357         }
5358
5359         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5360                 RTE_ETHDEV_LOG(INFO,
5361                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5362                         queue_id, port_id);
5363                 return -EINVAL;
5364         }
5365
5366         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5367
5368         memset(qinfo, 0, sizeof(*qinfo));
5369         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5370         qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5371
5372         return 0;
5373 }
5374
5375 int
5376 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5377                           struct rte_eth_burst_mode *mode)
5378 {
5379         struct rte_eth_dev *dev;
5380
5381         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5382         dev = &rte_eth_devices[port_id];
5383
5384         if (queue_id >= dev->data->nb_rx_queues) {
5385                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5386                 return -EINVAL;
5387         }
5388
5389         if (mode == NULL) {
5390                 RTE_ETHDEV_LOG(ERR,
5391                         "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5392                         port_id, queue_id);
5393                 return -EINVAL;
5394         }
5395
5396         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5397         memset(mode, 0, sizeof(*mode));
5398         return eth_err(port_id,
5399                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5400 }
5401
5402 int
5403 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5404                           struct rte_eth_burst_mode *mode)
5405 {
5406         struct rte_eth_dev *dev;
5407
5408         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5409         dev = &rte_eth_devices[port_id];
5410
5411         if (queue_id >= dev->data->nb_tx_queues) {
5412                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5413                 return -EINVAL;
5414         }
5415
5416         if (mode == NULL) {
5417                 RTE_ETHDEV_LOG(ERR,
5418                         "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5419                         port_id, queue_id);
5420                 return -EINVAL;
5421         }
5422
5423         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5424         memset(mode, 0, sizeof(*mode));
5425         return eth_err(port_id,
5426                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5427 }
5428
5429 int
5430 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5431                 struct rte_power_monitor_cond *pmc)
5432 {
5433         struct rte_eth_dev *dev;
5434
5435         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5436         dev = &rte_eth_devices[port_id];
5437
5438         if (queue_id >= dev->data->nb_rx_queues) {
5439                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5440                 return -EINVAL;
5441         }
5442
5443         if (pmc == NULL) {
5444                 RTE_ETHDEV_LOG(ERR,
5445                         "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5446                         port_id, queue_id);
5447                 return -EINVAL;
5448         }
5449
5450         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
5451         return eth_err(port_id,
5452                 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5453 }
5454
5455 int
5456 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5457                              struct rte_ether_addr *mc_addr_set,
5458                              uint32_t nb_mc_addr)
5459 {
5460         struct rte_eth_dev *dev;
5461
5462         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5463         dev = &rte_eth_devices[port_id];
5464
5465         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5466         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5467                                                 mc_addr_set, nb_mc_addr));
5468 }
5469
5470 int
5471 rte_eth_timesync_enable(uint16_t port_id)
5472 {
5473         struct rte_eth_dev *dev;
5474
5475         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5476         dev = &rte_eth_devices[port_id];
5477
5478         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5479         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5480 }
5481
5482 int
5483 rte_eth_timesync_disable(uint16_t port_id)
5484 {
5485         struct rte_eth_dev *dev;
5486
5487         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5488         dev = &rte_eth_devices[port_id];
5489
5490         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5491         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5492 }
5493
5494 int
5495 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5496                                    uint32_t flags)
5497 {
5498         struct rte_eth_dev *dev;
5499
5500         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5501         dev = &rte_eth_devices[port_id];
5502
5503         if (timestamp == NULL) {
5504                 RTE_ETHDEV_LOG(ERR,
5505                         "Cannot read ethdev port %u Rx timestamp to NULL\n",
5506                         port_id);
5507                 return -EINVAL;
5508         }
5509
5510         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5511         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5512                                 (dev, timestamp, flags));
5513 }
5514
5515 int
5516 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5517                                    struct timespec *timestamp)
5518 {
5519         struct rte_eth_dev *dev;
5520
5521         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5522         dev = &rte_eth_devices[port_id];
5523
5524         if (timestamp == NULL) {
5525                 RTE_ETHDEV_LOG(ERR,
5526                         "Cannot read ethdev port %u Tx timestamp to NULL\n",
5527                         port_id);
5528                 return -EINVAL;
5529         }
5530
5531         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5532         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5533                                 (dev, timestamp));
5534 }
5535
5536 int
5537 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5538 {
5539         struct rte_eth_dev *dev;
5540
5541         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5542         dev = &rte_eth_devices[port_id];
5543
5544         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5545         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5546 }
5547
5548 int
5549 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5550 {
5551         struct rte_eth_dev *dev;
5552
5553         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5554         dev = &rte_eth_devices[port_id];
5555
5556         if (timestamp == NULL) {
5557                 RTE_ETHDEV_LOG(ERR,
5558                         "Cannot read ethdev port %u timesync time to NULL\n",
5559                         port_id);
5560                 return -EINVAL;
5561         }
5562
5563         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5564         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5565                                                                 timestamp));
5566 }
5567
5568 int
5569 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5570 {
5571         struct rte_eth_dev *dev;
5572
5573         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5574         dev = &rte_eth_devices[port_id];
5575
5576         if (timestamp == NULL) {
5577                 RTE_ETHDEV_LOG(ERR,
5578                         "Cannot write ethdev port %u timesync from NULL time\n",
5579                         port_id);
5580                 return -EINVAL;
5581         }
5582
5583         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5584         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5585                                                                 timestamp));
5586 }
5587
5588 int
5589 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5590 {
5591         struct rte_eth_dev *dev;
5592
5593         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5594         dev = &rte_eth_devices[port_id];
5595
5596         if (clock == NULL) {
5597                 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5598                         port_id);
5599                 return -EINVAL;
5600         }
5601
5602         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5603         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5604 }
5605
5606 int
5607 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5608 {
5609         struct rte_eth_dev *dev;
5610
5611         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5612         dev = &rte_eth_devices[port_id];
5613
5614         if (info == NULL) {
5615                 RTE_ETHDEV_LOG(ERR,
5616                         "Cannot get ethdev port %u register info to NULL\n",
5617                         port_id);
5618                 return -EINVAL;
5619         }
5620
5621         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5622         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5623 }
5624
5625 int
5626 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5627 {
5628         struct rte_eth_dev *dev;
5629
5630         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5631         dev = &rte_eth_devices[port_id];
5632
5633         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5634         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5635 }
5636
5637 int
5638 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5639 {
5640         struct rte_eth_dev *dev;
5641
5642         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5643         dev = &rte_eth_devices[port_id];
5644
5645         if (info == NULL) {
5646                 RTE_ETHDEV_LOG(ERR,
5647                         "Cannot get ethdev port %u EEPROM info to NULL\n",
5648                         port_id);
5649                 return -EINVAL;
5650         }
5651
5652         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5653         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5654 }
5655
5656 int
5657 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5658 {
5659         struct rte_eth_dev *dev;
5660
5661         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5662         dev = &rte_eth_devices[port_id];
5663
5664         if (info == NULL) {
5665                 RTE_ETHDEV_LOG(ERR,
5666                         "Cannot set ethdev port %u EEPROM from NULL info\n",
5667                         port_id);
5668                 return -EINVAL;
5669         }
5670
5671         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5672         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5673 }
5674
5675 int
5676 rte_eth_dev_get_module_info(uint16_t port_id,
5677                             struct rte_eth_dev_module_info *modinfo)
5678 {
5679         struct rte_eth_dev *dev;
5680
5681         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5682         dev = &rte_eth_devices[port_id];
5683
5684         if (modinfo == NULL) {
5685                 RTE_ETHDEV_LOG(ERR,
5686                         "Cannot get ethdev port %u EEPROM module info to NULL\n",
5687                         port_id);
5688                 return -EINVAL;
5689         }
5690
5691         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5692         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5693 }
5694
5695 int
5696 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5697                               struct rte_dev_eeprom_info *info)
5698 {
5699         struct rte_eth_dev *dev;
5700
5701         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5702         dev = &rte_eth_devices[port_id];
5703
5704         if (info == NULL) {
5705                 RTE_ETHDEV_LOG(ERR,
5706                         "Cannot get ethdev port %u module EEPROM info to NULL\n",
5707                         port_id);
5708                 return -EINVAL;
5709         }
5710
5711         if (info->data == NULL) {
5712                 RTE_ETHDEV_LOG(ERR,
5713                         "Cannot get ethdev port %u module EEPROM data to NULL\n",
5714                         port_id);
5715                 return -EINVAL;
5716         }
5717
5718         if (info->length == 0) {
5719                 RTE_ETHDEV_LOG(ERR,
5720                         "Cannot get ethdev port %u module EEPROM to data with zero size\n",
5721                         port_id);
5722                 return -EINVAL;
5723         }
5724
5725         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5726         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5727 }
5728
5729 int
5730 rte_eth_dev_get_dcb_info(uint16_t port_id,
5731                              struct rte_eth_dcb_info *dcb_info)
5732 {
5733         struct rte_eth_dev *dev;
5734
5735         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5736         dev = &rte_eth_devices[port_id];
5737
5738         if (dcb_info == NULL) {
5739                 RTE_ETHDEV_LOG(ERR,
5740                         "Cannot get ethdev port %u DCB info to NULL\n",
5741                         port_id);
5742                 return -EINVAL;
5743         }
5744
5745         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5746
5747         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5748         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5749 }
5750
5751 static void
5752 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5753                 const struct rte_eth_desc_lim *desc_lim)
5754 {
5755         if (desc_lim->nb_align != 0)
5756                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5757
5758         if (desc_lim->nb_max != 0)
5759                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5760
5761         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5762 }
5763
5764 int
5765 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5766                                  uint16_t *nb_rx_desc,
5767                                  uint16_t *nb_tx_desc)
5768 {
5769         struct rte_eth_dev_info dev_info;
5770         int ret;
5771
5772         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5773
5774         ret = rte_eth_dev_info_get(port_id, &dev_info);
5775         if (ret != 0)
5776                 return ret;
5777
5778         if (nb_rx_desc != NULL)
5779                 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5780
5781         if (nb_tx_desc != NULL)
5782                 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5783
5784         return 0;
5785 }
5786
5787 int
5788 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5789                                    struct rte_eth_hairpin_cap *cap)
5790 {
5791         struct rte_eth_dev *dev;
5792
5793         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5794         dev = &rte_eth_devices[port_id];
5795
5796         if (cap == NULL) {
5797                 RTE_ETHDEV_LOG(ERR,
5798                         "Cannot get ethdev port %u hairpin capability to NULL\n",
5799                         port_id);
5800                 return -EINVAL;
5801         }
5802
5803         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5804         memset(cap, 0, sizeof(*cap));
5805         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5806 }
5807
5808 int
5809 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5810 {
5811         if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5812                 return 1;
5813         return 0;
5814 }
5815
5816 int
5817 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5818 {
5819         if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5820                 return 1;
5821         return 0;
5822 }
5823
5824 int
5825 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5826 {
5827         struct rte_eth_dev *dev;
5828
5829         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5830         dev = &rte_eth_devices[port_id];
5831
5832         if (pool == NULL) {
5833                 RTE_ETHDEV_LOG(ERR,
5834                         "Cannot test ethdev port %u mempool operation from NULL pool\n",
5835                         port_id);
5836                 return -EINVAL;
5837         }
5838
5839         if (*dev->dev_ops->pool_ops_supported == NULL)
5840                 return 1; /* all pools are supported */
5841
5842         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5843 }
5844
5845 /**
5846  * A set of values to describe the possible states of a switch domain.
5847  */
5848 enum rte_eth_switch_domain_state {
5849         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5850         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5851 };
5852
5853 /**
5854  * Array of switch domains available for allocation. Array is sized to
5855  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5856  * ethdev ports in a single process.
5857  */
5858 static struct rte_eth_dev_switch {
5859         enum rte_eth_switch_domain_state state;
5860 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
5861
5862 int
5863 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5864 {
5865         uint16_t i;
5866
5867         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5868
5869         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5870                 if (eth_dev_switch_domains[i].state ==
5871                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5872                         eth_dev_switch_domains[i].state =
5873                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5874                         *domain_id = i;
5875                         return 0;
5876                 }
5877         }
5878
5879         return -ENOSPC;
5880 }
5881
5882 int
5883 rte_eth_switch_domain_free(uint16_t domain_id)
5884 {
5885         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5886                 domain_id >= RTE_MAX_ETHPORTS)
5887                 return -EINVAL;
5888
5889         if (eth_dev_switch_domains[domain_id].state !=
5890                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5891                 return -EINVAL;
5892
5893         eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5894
5895         return 0;
5896 }
5897
5898 static int
5899 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5900 {
5901         int state;
5902         struct rte_kvargs_pair *pair;
5903         char *letter;
5904
5905         arglist->str = strdup(str_in);
5906         if (arglist->str == NULL)
5907                 return -ENOMEM;
5908
5909         letter = arglist->str;
5910         state = 0;
5911         arglist->count = 0;
5912         pair = &arglist->pairs[0];
5913         while (1) {
5914                 switch (state) {
5915                 case 0: /* Initial */
5916                         if (*letter == '=')
5917                                 return -EINVAL;
5918                         else if (*letter == '\0')
5919                                 return 0;
5920
5921                         state = 1;
5922                         pair->key = letter;
5923                         /* fall-thru */
5924
5925                 case 1: /* Parsing key */
5926                         if (*letter == '=') {
5927                                 *letter = '\0';
5928                                 pair->value = letter + 1;
5929                                 state = 2;
5930                         } else if (*letter == ',' || *letter == '\0')
5931                                 return -EINVAL;
5932                         break;
5933
5934
5935                 case 2: /* Parsing value */
5936                         if (*letter == '[')
5937                                 state = 3;
5938                         else if (*letter == ',') {
5939                                 *letter = '\0';
5940                                 arglist->count++;
5941                                 pair = &arglist->pairs[arglist->count];
5942                                 state = 0;
5943                         } else if (*letter == '\0') {
5944                                 letter--;
5945                                 arglist->count++;
5946                                 pair = &arglist->pairs[arglist->count];
5947                                 state = 0;
5948                         }
5949                         break;
5950
5951                 case 3: /* Parsing list */
5952                         if (*letter == ']')
5953                                 state = 2;
5954                         else if (*letter == '\0')
5955                                 return -EINVAL;
5956                         break;
5957                 }
5958                 letter++;
5959         }
5960 }
5961
5962 int
5963 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5964 {
5965         struct rte_kvargs args;
5966         struct rte_kvargs_pair *pair;
5967         unsigned int i;
5968         int result = 0;
5969
5970         memset(eth_da, 0, sizeof(*eth_da));
5971
5972         result = eth_dev_devargs_tokenise(&args, dargs);
5973         if (result < 0)
5974                 goto parse_cleanup;
5975
5976         for (i = 0; i < args.count; i++) {
5977                 pair = &args.pairs[i];
5978                 if (strcmp("representor", pair->key) == 0) {
5979                         if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
5980                                 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
5981                                         dargs);
5982                                 result = -1;
5983                                 goto parse_cleanup;
5984                         }
5985                         result = rte_eth_devargs_parse_representor_ports(
5986                                         pair->value, eth_da);
5987                         if (result < 0)
5988                                 goto parse_cleanup;
5989                 }
5990         }
5991
5992 parse_cleanup:
5993         if (args.str)
5994                 free(args.str);
5995
5996         return result;
5997 }
5998
5999 int
6000 rte_eth_representor_id_get(const struct rte_eth_dev *ethdev,
6001                            enum rte_eth_representor_type type,
6002                            int controller, int pf, int representor_port,
6003                            uint16_t *repr_id)
6004 {
6005         int ret, n, i, count;
6006         struct rte_eth_representor_info *info = NULL;
6007         size_t size;
6008
6009         if (type == RTE_ETH_REPRESENTOR_NONE)
6010                 return 0;
6011         if (repr_id == NULL)
6012                 return -EINVAL;
6013
6014         /* Get PMD representor range info. */
6015         ret = rte_eth_representor_info_get(ethdev->data->port_id, NULL);
6016         if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
6017             controller == -1 && pf == -1) {
6018                 /* Direct mapping for legacy VF representor. */
6019                 *repr_id = representor_port;
6020                 return 0;
6021         } else if (ret < 0) {
6022                 return ret;
6023         }
6024         n = ret;
6025         size = sizeof(*info) + n * sizeof(info->ranges[0]);
6026         info = calloc(1, size);
6027         if (info == NULL)
6028                 return -ENOMEM;
6029         ret = rte_eth_representor_info_get(ethdev->data->port_id, info);
6030         if (ret < 0)
6031                 goto out;
6032
6033         /* Default controller and pf to caller. */
6034         if (controller == -1)
6035                 controller = info->controller;
6036         if (pf == -1)
6037                 pf = info->pf;
6038
6039         /* Locate representor ID. */
6040         ret = -ENOENT;
6041         for (i = 0; i < n; ++i) {
6042                 if (info->ranges[i].type != type)
6043                         continue;
6044                 if (info->ranges[i].controller != controller)
6045                         continue;
6046                 if (info->ranges[i].id_end < info->ranges[i].id_base) {
6047                         RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
6048                                 ethdev->data->port_id, info->ranges[i].id_base,
6049                                 info->ranges[i].id_end, i);
6050                         continue;
6051
6052                 }
6053                 count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
6054                 switch (info->ranges[i].type) {
6055                 case RTE_ETH_REPRESENTOR_PF:
6056                         if (pf < info->ranges[i].pf ||
6057                             pf >= info->ranges[i].pf + count)
6058                                 continue;
6059                         *repr_id = info->ranges[i].id_base +
6060                                    (pf - info->ranges[i].pf);
6061                         ret = 0;
6062                         goto out;
6063                 case RTE_ETH_REPRESENTOR_VF:
6064                         if (info->ranges[i].pf != pf)
6065                                 continue;
6066                         if (representor_port < info->ranges[i].vf ||
6067                             representor_port >= info->ranges[i].vf + count)
6068                                 continue;
6069                         *repr_id = info->ranges[i].id_base +
6070                                    (representor_port - info->ranges[i].vf);
6071                         ret = 0;
6072                         goto out;
6073                 case RTE_ETH_REPRESENTOR_SF:
6074                         if (info->ranges[i].pf != pf)
6075                                 continue;
6076                         if (representor_port < info->ranges[i].sf ||
6077                             representor_port >= info->ranges[i].sf + count)
6078                                 continue;
6079                         *repr_id = info->ranges[i].id_base +
6080                               (representor_port - info->ranges[i].sf);
6081                         ret = 0;
6082                         goto out;
6083                 default:
6084                         break;
6085                 }
6086         }
6087 out:
6088         free(info);
6089         return ret;
6090 }
6091
6092 static int
6093 eth_dev_handle_port_list(const char *cmd __rte_unused,
6094                 const char *params __rte_unused,
6095                 struct rte_tel_data *d)
6096 {
6097         int port_id;
6098
6099         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
6100         RTE_ETH_FOREACH_DEV(port_id)
6101                 rte_tel_data_add_array_int(d, port_id);
6102         return 0;
6103 }
6104
6105 static void
6106 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
6107                 const char *stat_name)
6108 {
6109         int q;
6110         struct rte_tel_data *q_data = rte_tel_data_alloc();
6111         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
6112         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
6113                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
6114         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
6115 }
6116
6117 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
6118
6119 static int
6120 eth_dev_handle_port_stats(const char *cmd __rte_unused,
6121                 const char *params,
6122                 struct rte_tel_data *d)
6123 {
6124         struct rte_eth_stats stats;
6125         int port_id, ret;
6126
6127         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6128                 return -1;
6129
6130         port_id = atoi(params);
6131         if (!rte_eth_dev_is_valid_port(port_id))
6132                 return -1;
6133
6134         ret = rte_eth_stats_get(port_id, &stats);
6135         if (ret < 0)
6136                 return -1;
6137
6138         rte_tel_data_start_dict(d);
6139         ADD_DICT_STAT(stats, ipackets);
6140         ADD_DICT_STAT(stats, opackets);
6141         ADD_DICT_STAT(stats, ibytes);
6142         ADD_DICT_STAT(stats, obytes);
6143         ADD_DICT_STAT(stats, imissed);
6144         ADD_DICT_STAT(stats, ierrors);
6145         ADD_DICT_STAT(stats, oerrors);
6146         ADD_DICT_STAT(stats, rx_nombuf);
6147         eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
6148         eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
6149         eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
6150         eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
6151         eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
6152
6153         return 0;
6154 }
6155
6156 static int
6157 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
6158                 const char *params,
6159                 struct rte_tel_data *d)
6160 {
6161         struct rte_eth_xstat *eth_xstats;
6162         struct rte_eth_xstat_name *xstat_names;
6163         int port_id, num_xstats;
6164         int i, ret;
6165         char *end_param;
6166
6167         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6168                 return -1;
6169
6170         port_id = strtoul(params, &end_param, 0);
6171         if (*end_param != '\0')
6172                 RTE_ETHDEV_LOG(NOTICE,
6173                         "Extra parameters passed to ethdev telemetry command, ignoring");
6174         if (!rte_eth_dev_is_valid_port(port_id))
6175                 return -1;
6176
6177         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
6178         if (num_xstats < 0)
6179                 return -1;
6180
6181         /* use one malloc for both names and stats */
6182         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
6183                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
6184         if (eth_xstats == NULL)
6185                 return -1;
6186         xstat_names = (void *)&eth_xstats[num_xstats];
6187
6188         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
6189         if (ret < 0 || ret > num_xstats) {
6190                 free(eth_xstats);
6191                 return -1;
6192         }
6193
6194         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
6195         if (ret < 0 || ret > num_xstats) {
6196                 free(eth_xstats);
6197                 return -1;
6198         }
6199
6200         rte_tel_data_start_dict(d);
6201         for (i = 0; i < num_xstats; i++)
6202                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
6203                                 eth_xstats[i].value);
6204         return 0;
6205 }
6206
6207 static int
6208 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
6209                 const char *params,
6210                 struct rte_tel_data *d)
6211 {
6212         static const char *status_str = "status";
6213         int ret, port_id;
6214         struct rte_eth_link link;
6215         char *end_param;
6216
6217         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6218                 return -1;
6219
6220         port_id = strtoul(params, &end_param, 0);
6221         if (*end_param != '\0')
6222                 RTE_ETHDEV_LOG(NOTICE,
6223                         "Extra parameters passed to ethdev telemetry command, ignoring");
6224         if (!rte_eth_dev_is_valid_port(port_id))
6225                 return -1;
6226
6227         ret = rte_eth_link_get_nowait(port_id, &link);
6228         if (ret < 0)
6229                 return -1;
6230
6231         rte_tel_data_start_dict(d);
6232         if (!link.link_status) {
6233                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
6234                 return 0;
6235         }
6236         rte_tel_data_add_dict_string(d, status_str, "UP");
6237         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
6238         rte_tel_data_add_dict_string(d, "duplex",
6239                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
6240                                 "full-duplex" : "half-duplex");
6241         return 0;
6242 }
6243
6244 int
6245 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
6246                                   struct rte_hairpin_peer_info *cur_info,
6247                                   struct rte_hairpin_peer_info *peer_info,
6248                                   uint32_t direction)
6249 {
6250         struct rte_eth_dev *dev;
6251
6252         /* Current queue information is not mandatory. */
6253         if (peer_info == NULL)
6254                 return -EINVAL;
6255
6256         /* No need to check the validity again. */
6257         dev = &rte_eth_devices[peer_port];
6258         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
6259                                 -ENOTSUP);
6260
6261         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
6262                                         cur_info, peer_info, direction);
6263 }
6264
6265 int
6266 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
6267                                 struct rte_hairpin_peer_info *peer_info,
6268                                 uint32_t direction)
6269 {
6270         struct rte_eth_dev *dev;
6271
6272         if (peer_info == NULL)
6273                 return -EINVAL;
6274
6275         /* No need to check the validity again. */
6276         dev = &rte_eth_devices[cur_port];
6277         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
6278                                 -ENOTSUP);
6279
6280         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
6281                                                         peer_info, direction);
6282 }
6283
6284 int
6285 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
6286                                   uint32_t direction)
6287 {
6288         struct rte_eth_dev *dev;
6289
6290         /* No need to check the validity again. */
6291         dev = &rte_eth_devices[cur_port];
6292         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
6293                                 -ENOTSUP);
6294
6295         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
6296                                                           direction);
6297 }
6298
6299 int
6300 rte_eth_representor_info_get(uint16_t port_id,
6301                              struct rte_eth_representor_info *info)
6302 {
6303         struct rte_eth_dev *dev;
6304
6305         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6306         dev = &rte_eth_devices[port_id];
6307
6308         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
6309         return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6310 }
6311
6312 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
6313
6314 RTE_INIT(ethdev_init_telemetry)
6315 {
6316         rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
6317                         "Returns list of available ethdev ports. Takes no parameters");
6318         rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
6319                         "Returns the common stats for a port. Parameters: int port_id");
6320         rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
6321                         "Returns the extended stats for a port. Parameters: int port_id");
6322         rte_telemetry_register_cmd("/ethdev/link_status",
6323                         eth_dev_handle_port_link_status,
6324                         "Returns the link status for a port. Parameters: int port_id");
6325 }