net/ice/base: add GCO defines and GCO flex descriptor
[dpdk.git] / lib / ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13
14 #include <rte_byteorder.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_interrupts.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_memzone.h>
21 #include <rte_launch.h>
22 #include <rte_eal.h>
23 #include <rte_per_lcore.h>
24 #include <rte_lcore.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_common.h>
27 #include <rte_mempool.h>
28 #include <rte_malloc.h>
29 #include <rte_mbuf.h>
30 #include <rte_errno.h>
31 #include <rte_spinlock.h>
32 #include <rte_string_fns.h>
33 #include <rte_kvargs.h>
34 #include <rte_class.h>
35 #include <rte_ether.h>
36 #include <rte_telemetry.h>
37
38 #include "rte_ethdev_trace.h"
39 #include "rte_ethdev.h"
40 #include "ethdev_driver.h"
41 #include "ethdev_profile.h"
42 #include "ethdev_private.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46
47 /* spinlock for eth device callbacks */
48 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* spinlock for add/remove rx callbacks */
51 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
52
53 /* spinlock for add/remove tx callbacks */
54 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for shared data allocation */
57 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* store statistics names and its offset in stats structure  */
60 struct rte_eth_xstats_name_off {
61         char name[RTE_ETH_XSTATS_NAME_SIZE];
62         unsigned offset;
63 };
64
65 /* Shared memory between primary and secondary processes. */
66 static struct {
67         uint64_t next_owner_id;
68         rte_spinlock_t ownership_lock;
69         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
70 } *eth_dev_shared_data;
71
72 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
73         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
74         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
75         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
76         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
77         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
78         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
79         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
80         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
81                 rx_nombuf)},
82 };
83
84 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
85
86 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
87         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
88         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
89         {"errors", offsetof(struct rte_eth_stats, q_errors)},
90 };
91
92 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
93
94 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
95         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
96         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
97 };
98 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
99
100 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
101         { DEV_RX_OFFLOAD_##_name, #_name }
102
103 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)       \
104         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
105
106 static const struct {
107         uint64_t offload;
108         const char *name;
109 } eth_dev_rx_offload_names[] = {
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
111         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
112         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
113         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
115         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
119         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
120         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
121         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
122         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
123         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
124         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
125         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
126         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
127         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
128         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
129         RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
130 };
131
132 #undef RTE_RX_OFFLOAD_BIT2STR
133 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } eth_dev_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
163         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
164 };
165
166 #undef RTE_TX_OFFLOAD_BIT2STR
167
168 /**
169  * The user application callback description.
170  *
171  * It contains callback address to be registered by user application,
172  * the pointer to the parameters for callback, and the event type.
173  */
174 struct rte_eth_dev_callback {
175         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
176         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
177         void *cb_arg;                           /**< Parameter for callback */
178         void *ret_param;                        /**< Return parameter */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 int
189 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
190 {
191         int ret;
192         struct rte_devargs devargs;
193         const char *bus_param_key;
194         char *bus_str = NULL;
195         char *cls_str = NULL;
196         int str_size;
197
198         if (iter == NULL) {
199                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
200                 return -EINVAL;
201         }
202
203         if (devargs_str == NULL) {
204                 RTE_ETHDEV_LOG(ERR,
205                         "Cannot initialize iterator from NULL device description string\n");
206                 return -EINVAL;
207         }
208
209         memset(iter, 0, sizeof(*iter));
210         memset(&devargs, 0, sizeof(devargs));
211
212         /*
213          * The devargs string may use various syntaxes:
214          *   - 0000:08:00.0,representor=[1-3]
215          *   - pci:0000:06:00.0,representor=[0,5]
216          *   - class=eth,mac=00:11:22:33:44:55
217          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
218          */
219
220         /*
221          * Handle pure class filter (i.e. without any bus-level argument),
222          * from future new syntax.
223          * rte_devargs_parse() is not yet supporting the new syntax,
224          * that's why this simple case is temporarily parsed here.
225          */
226 #define iter_anybus_str "class=eth,"
227         if (strncmp(devargs_str, iter_anybus_str,
228                         strlen(iter_anybus_str)) == 0) {
229                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
230                 goto end;
231         }
232
233         /* Split bus, device and parameters. */
234         ret = rte_devargs_parse(&devargs, devargs_str);
235         if (ret != 0)
236                 goto error;
237
238         /*
239          * Assume parameters of old syntax can match only at ethdev level.
240          * Extra parameters will be ignored, thanks to "+" prefix.
241          */
242         str_size = strlen(devargs.args) + 2;
243         cls_str = malloc(str_size);
244         if (cls_str == NULL) {
245                 ret = -ENOMEM;
246                 goto error;
247         }
248         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
249         if (ret != str_size - 1) {
250                 ret = -EINVAL;
251                 goto error;
252         }
253         iter->cls_str = cls_str;
254
255         iter->bus = devargs.bus;
256         if (iter->bus->dev_iterate == NULL) {
257                 ret = -ENOTSUP;
258                 goto error;
259         }
260
261         /* Convert bus args to new syntax for use with new API dev_iterate. */
262         if ((strcmp(iter->bus->name, "vdev") == 0) ||
263                 (strcmp(iter->bus->name, "fslmc") == 0) ||
264                 (strcmp(iter->bus->name, "dpaa_bus") == 0)) {
265                 bus_param_key = "name";
266         } else if (strcmp(iter->bus->name, "pci") == 0) {
267                 bus_param_key = "addr";
268         } else {
269                 ret = -ENOTSUP;
270                 goto error;
271         }
272         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
273         bus_str = malloc(str_size);
274         if (bus_str == NULL) {
275                 ret = -ENOMEM;
276                 goto error;
277         }
278         ret = snprintf(bus_str, str_size, "%s=%s",
279                         bus_param_key, devargs.name);
280         if (ret != str_size - 1) {
281                 ret = -EINVAL;
282                 goto error;
283         }
284         iter->bus_str = bus_str;
285
286 end:
287         iter->cls = rte_class_find_by_name("eth");
288         rte_devargs_reset(&devargs);
289         return 0;
290
291 error:
292         if (ret == -ENOTSUP)
293                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
294                                 iter->bus->name);
295         rte_devargs_reset(&devargs);
296         free(bus_str);
297         free(cls_str);
298         return ret;
299 }
300
301 uint16_t
302 rte_eth_iterator_next(struct rte_dev_iterator *iter)
303 {
304         if (iter == NULL) {
305                 RTE_ETHDEV_LOG(ERR,
306                         "Cannot get next device from NULL iterator\n");
307                 return RTE_MAX_ETHPORTS;
308         }
309
310         if (iter->cls == NULL) /* invalid ethdev iterator */
311                 return RTE_MAX_ETHPORTS;
312
313         do { /* loop to try all matching rte_device */
314                 /* If not pure ethdev filter and */
315                 if (iter->bus != NULL &&
316                                 /* not in middle of rte_eth_dev iteration, */
317                                 iter->class_device == NULL) {
318                         /* get next rte_device to try. */
319                         iter->device = iter->bus->dev_iterate(
320                                         iter->device, iter->bus_str, iter);
321                         if (iter->device == NULL)
322                                 break; /* no more rte_device candidate */
323                 }
324                 /* A device is matching bus part, need to check ethdev part. */
325                 iter->class_device = iter->cls->dev_iterate(
326                                 iter->class_device, iter->cls_str, iter);
327                 if (iter->class_device != NULL)
328                         return eth_dev_to_id(iter->class_device); /* match */
329         } while (iter->bus != NULL); /* need to try next rte_device */
330
331         /* No more ethdev port to iterate. */
332         rte_eth_iterator_cleanup(iter);
333         return RTE_MAX_ETHPORTS;
334 }
335
336 void
337 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
338 {
339         if (iter == NULL) {
340                 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
341                 return;
342         }
343
344         if (iter->bus_str == NULL)
345                 return; /* nothing to free in pure class filter */
346         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
347         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
348         memset(iter, 0, sizeof(*iter));
349 }
350
351 uint16_t
352 rte_eth_find_next(uint16_t port_id)
353 {
354         while (port_id < RTE_MAX_ETHPORTS &&
355                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
356                 port_id++;
357
358         if (port_id >= RTE_MAX_ETHPORTS)
359                 return RTE_MAX_ETHPORTS;
360
361         return port_id;
362 }
363
364 /*
365  * Macro to iterate over all valid ports for internal usage.
366  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
367  */
368 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
369         for (port_id = rte_eth_find_next(0); \
370              port_id < RTE_MAX_ETHPORTS; \
371              port_id = rte_eth_find_next(port_id + 1))
372
373 uint16_t
374 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
375 {
376         port_id = rte_eth_find_next(port_id);
377         while (port_id < RTE_MAX_ETHPORTS &&
378                         rte_eth_devices[port_id].device != parent)
379                 port_id = rte_eth_find_next(port_id + 1);
380
381         return port_id;
382 }
383
384 uint16_t
385 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
386 {
387         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
388         return rte_eth_find_next_of(port_id,
389                         rte_eth_devices[ref_port_id].device);
390 }
391
392 static void
393 eth_dev_shared_data_prepare(void)
394 {
395         const unsigned flags = 0;
396         const struct rte_memzone *mz;
397
398         rte_spinlock_lock(&eth_dev_shared_data_lock);
399
400         if (eth_dev_shared_data == NULL) {
401                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
402                         /* Allocate port data and ownership shared memory. */
403                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
404                                         sizeof(*eth_dev_shared_data),
405                                         rte_socket_id(), flags);
406                 } else
407                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
408                 if (mz == NULL)
409                         rte_panic("Cannot allocate ethdev shared data\n");
410
411                 eth_dev_shared_data = mz->addr;
412                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
413                         eth_dev_shared_data->next_owner_id =
414                                         RTE_ETH_DEV_NO_OWNER + 1;
415                         rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
416                         memset(eth_dev_shared_data->data, 0,
417                                sizeof(eth_dev_shared_data->data));
418                 }
419         }
420
421         rte_spinlock_unlock(&eth_dev_shared_data_lock);
422 }
423
424 static bool
425 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
426 {
427         return ethdev->data->name[0] != '\0';
428 }
429
430 static struct rte_eth_dev *
431 eth_dev_allocated(const char *name)
432 {
433         uint16_t i;
434
435         RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
436
437         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
438                 if (rte_eth_devices[i].data != NULL &&
439                     strcmp(rte_eth_devices[i].data->name, name) == 0)
440                         return &rte_eth_devices[i];
441         }
442         return NULL;
443 }
444
445 struct rte_eth_dev *
446 rte_eth_dev_allocated(const char *name)
447 {
448         struct rte_eth_dev *ethdev;
449
450         eth_dev_shared_data_prepare();
451
452         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
453
454         ethdev = eth_dev_allocated(name);
455
456         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
457
458         return ethdev;
459 }
460
461 static uint16_t
462 eth_dev_find_free_port(void)
463 {
464         uint16_t i;
465
466         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
467                 /* Using shared name field to find a free port. */
468                 if (eth_dev_shared_data->data[i].name[0] == '\0') {
469                         RTE_ASSERT(rte_eth_devices[i].state ==
470                                    RTE_ETH_DEV_UNUSED);
471                         return i;
472                 }
473         }
474         return RTE_MAX_ETHPORTS;
475 }
476
477 static struct rte_eth_dev *
478 eth_dev_get(uint16_t port_id)
479 {
480         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
481
482         eth_dev->data = &eth_dev_shared_data->data[port_id];
483
484         return eth_dev;
485 }
486
487 struct rte_eth_dev *
488 rte_eth_dev_allocate(const char *name)
489 {
490         uint16_t port_id;
491         struct rte_eth_dev *eth_dev = NULL;
492         size_t name_len;
493
494         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
495         if (name_len == 0) {
496                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
497                 return NULL;
498         }
499
500         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
501                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
502                 return NULL;
503         }
504
505         eth_dev_shared_data_prepare();
506
507         /* Synchronize port creation between primary and secondary threads. */
508         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
509
510         if (eth_dev_allocated(name) != NULL) {
511                 RTE_ETHDEV_LOG(ERR,
512                         "Ethernet device with name %s already allocated\n",
513                         name);
514                 goto unlock;
515         }
516
517         port_id = eth_dev_find_free_port();
518         if (port_id == RTE_MAX_ETHPORTS) {
519                 RTE_ETHDEV_LOG(ERR,
520                         "Reached maximum number of Ethernet ports\n");
521                 goto unlock;
522         }
523
524         eth_dev = eth_dev_get(port_id);
525         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
526         eth_dev->data->port_id = port_id;
527         eth_dev->data->mtu = RTE_ETHER_MTU;
528         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
529
530 unlock:
531         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
532
533         return eth_dev;
534 }
535
536 /*
537  * Attach to a port already registered by the primary process, which
538  * makes sure that the same device would have the same port id both
539  * in the primary and secondary process.
540  */
541 struct rte_eth_dev *
542 rte_eth_dev_attach_secondary(const char *name)
543 {
544         uint16_t i;
545         struct rte_eth_dev *eth_dev = NULL;
546
547         eth_dev_shared_data_prepare();
548
549         /* Synchronize port attachment to primary port creation and release. */
550         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
551
552         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
553                 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
554                         break;
555         }
556         if (i == RTE_MAX_ETHPORTS) {
557                 RTE_ETHDEV_LOG(ERR,
558                         "Device %s is not driven by the primary process\n",
559                         name);
560         } else {
561                 eth_dev = eth_dev_get(i);
562                 RTE_ASSERT(eth_dev->data->port_id == i);
563         }
564
565         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
566         return eth_dev;
567 }
568
569 int
570 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
571 {
572         if (eth_dev == NULL)
573                 return -EINVAL;
574
575         eth_dev_shared_data_prepare();
576
577         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
578                 rte_eth_dev_callback_process(eth_dev,
579                                 RTE_ETH_EVENT_DESTROY, NULL);
580
581         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
582
583         eth_dev->state = RTE_ETH_DEV_UNUSED;
584         eth_dev->device = NULL;
585         eth_dev->process_private = NULL;
586         eth_dev->intr_handle = NULL;
587         eth_dev->rx_pkt_burst = NULL;
588         eth_dev->tx_pkt_burst = NULL;
589         eth_dev->tx_pkt_prepare = NULL;
590         eth_dev->rx_queue_count = NULL;
591         eth_dev->rx_descriptor_done = NULL;
592         eth_dev->rx_descriptor_status = NULL;
593         eth_dev->tx_descriptor_status = NULL;
594         eth_dev->dev_ops = NULL;
595
596         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
597                 rte_free(eth_dev->data->rx_queues);
598                 rte_free(eth_dev->data->tx_queues);
599                 rte_free(eth_dev->data->mac_addrs);
600                 rte_free(eth_dev->data->hash_mac_addrs);
601                 rte_free(eth_dev->data->dev_private);
602                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
603                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
604         }
605
606         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
607
608         return 0;
609 }
610
611 int
612 rte_eth_dev_is_valid_port(uint16_t port_id)
613 {
614         if (port_id >= RTE_MAX_ETHPORTS ||
615             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
616                 return 0;
617         else
618                 return 1;
619 }
620
621 static int
622 eth_is_valid_owner_id(uint64_t owner_id)
623 {
624         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
625             eth_dev_shared_data->next_owner_id <= owner_id)
626                 return 0;
627         return 1;
628 }
629
630 uint64_t
631 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
632 {
633         port_id = rte_eth_find_next(port_id);
634         while (port_id < RTE_MAX_ETHPORTS &&
635                         rte_eth_devices[port_id].data->owner.id != owner_id)
636                 port_id = rte_eth_find_next(port_id + 1);
637
638         return port_id;
639 }
640
641 int
642 rte_eth_dev_owner_new(uint64_t *owner_id)
643 {
644         if (owner_id == NULL) {
645                 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
646                 return -EINVAL;
647         }
648
649         eth_dev_shared_data_prepare();
650
651         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
652
653         *owner_id = eth_dev_shared_data->next_owner_id++;
654
655         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
656         return 0;
657 }
658
659 static int
660 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
661                        const struct rte_eth_dev_owner *new_owner)
662 {
663         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
664         struct rte_eth_dev_owner *port_owner;
665
666         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
667                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
668                         port_id);
669                 return -ENODEV;
670         }
671
672         if (new_owner == NULL) {
673                 RTE_ETHDEV_LOG(ERR,
674                         "Cannot set ethdev port %u owner from NULL owner\n",
675                         port_id);
676                 return -EINVAL;
677         }
678
679         if (!eth_is_valid_owner_id(new_owner->id) &&
680             !eth_is_valid_owner_id(old_owner_id)) {
681                 RTE_ETHDEV_LOG(ERR,
682                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
683                        old_owner_id, new_owner->id);
684                 return -EINVAL;
685         }
686
687         port_owner = &rte_eth_devices[port_id].data->owner;
688         if (port_owner->id != old_owner_id) {
689                 RTE_ETHDEV_LOG(ERR,
690                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
691                         port_id, port_owner->name, port_owner->id);
692                 return -EPERM;
693         }
694
695         /* can not truncate (same structure) */
696         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
697
698         port_owner->id = new_owner->id;
699
700         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
701                 port_id, new_owner->name, new_owner->id);
702
703         return 0;
704 }
705
706 int
707 rte_eth_dev_owner_set(const uint16_t port_id,
708                       const struct rte_eth_dev_owner *owner)
709 {
710         int ret;
711
712         eth_dev_shared_data_prepare();
713
714         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
715
716         ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
717
718         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
719         return ret;
720 }
721
722 int
723 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
724 {
725         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
726                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
727         int ret;
728
729         eth_dev_shared_data_prepare();
730
731         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
732
733         ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
734
735         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
736         return ret;
737 }
738
739 int
740 rte_eth_dev_owner_delete(const uint64_t owner_id)
741 {
742         uint16_t port_id;
743         int ret = 0;
744
745         eth_dev_shared_data_prepare();
746
747         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
748
749         if (eth_is_valid_owner_id(owner_id)) {
750                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
751                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
752                                 memset(&rte_eth_devices[port_id].data->owner, 0,
753                                        sizeof(struct rte_eth_dev_owner));
754                 RTE_ETHDEV_LOG(NOTICE,
755                         "All port owners owned by %016"PRIx64" identifier have removed\n",
756                         owner_id);
757         } else {
758                 RTE_ETHDEV_LOG(ERR,
759                                "Invalid owner id=%016"PRIx64"\n",
760                                owner_id);
761                 ret = -EINVAL;
762         }
763
764         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
765
766         return ret;
767 }
768
769 int
770 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
771 {
772         struct rte_eth_dev *ethdev;
773
774         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
775         ethdev = &rte_eth_devices[port_id];
776
777         if (!eth_dev_is_allocated(ethdev)) {
778                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
779                         port_id);
780                 return -ENODEV;
781         }
782
783         if (owner == NULL) {
784                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
785                         port_id);
786                 return -EINVAL;
787         }
788
789         eth_dev_shared_data_prepare();
790
791         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
792         rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
793         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
794
795         return 0;
796 }
797
798 int
799 rte_eth_dev_socket_id(uint16_t port_id)
800 {
801         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
802         return rte_eth_devices[port_id].data->numa_node;
803 }
804
805 void *
806 rte_eth_dev_get_sec_ctx(uint16_t port_id)
807 {
808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
809         return rte_eth_devices[port_id].security_ctx;
810 }
811
812 uint16_t
813 rte_eth_dev_count_avail(void)
814 {
815         uint16_t p;
816         uint16_t count;
817
818         count = 0;
819
820         RTE_ETH_FOREACH_DEV(p)
821                 count++;
822
823         return count;
824 }
825
826 uint16_t
827 rte_eth_dev_count_total(void)
828 {
829         uint16_t port, count = 0;
830
831         RTE_ETH_FOREACH_VALID_DEV(port)
832                 count++;
833
834         return count;
835 }
836
837 int
838 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
839 {
840         char *tmp;
841
842         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
843
844         if (name == NULL) {
845                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
846                         port_id);
847                 return -EINVAL;
848         }
849
850         /* shouldn't check 'rte_eth_devices[i].data',
851          * because it might be overwritten by VDEV PMD */
852         tmp = eth_dev_shared_data->data[port_id].name;
853         strcpy(name, tmp);
854         return 0;
855 }
856
857 int
858 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
859 {
860         uint16_t pid;
861
862         if (name == NULL) {
863                 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
864                 return -EINVAL;
865         }
866
867         if (port_id == NULL) {
868                 RTE_ETHDEV_LOG(ERR,
869                         "Cannot get port ID to NULL for %s\n", name);
870                 return -EINVAL;
871         }
872
873         RTE_ETH_FOREACH_VALID_DEV(pid)
874                 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
875                         *port_id = pid;
876                         return 0;
877                 }
878
879         return -ENODEV;
880 }
881
882 static int
883 eth_err(uint16_t port_id, int ret)
884 {
885         if (ret == 0)
886                 return 0;
887         if (rte_eth_dev_is_removed(port_id))
888                 return -EIO;
889         return ret;
890 }
891
892 static int
893 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
894 {
895         uint16_t old_nb_queues = dev->data->nb_rx_queues;
896         void **rxq;
897         unsigned i;
898
899         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
900                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
901                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
902                                 RTE_CACHE_LINE_SIZE);
903                 if (dev->data->rx_queues == NULL) {
904                         dev->data->nb_rx_queues = 0;
905                         return -(ENOMEM);
906                 }
907         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
908                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
909
910                 rxq = dev->data->rx_queues;
911
912                 for (i = nb_queues; i < old_nb_queues; i++)
913                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
914                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
915                                 RTE_CACHE_LINE_SIZE);
916                 if (rxq == NULL)
917                         return -(ENOMEM);
918                 if (nb_queues > old_nb_queues) {
919                         uint16_t new_qs = nb_queues - old_nb_queues;
920
921                         memset(rxq + old_nb_queues, 0,
922                                 sizeof(rxq[0]) * new_qs);
923                 }
924
925                 dev->data->rx_queues = rxq;
926
927         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
928                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
929
930                 rxq = dev->data->rx_queues;
931
932                 for (i = nb_queues; i < old_nb_queues; i++)
933                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
934
935                 rte_free(dev->data->rx_queues);
936                 dev->data->rx_queues = NULL;
937         }
938         dev->data->nb_rx_queues = nb_queues;
939         return 0;
940 }
941
942 static int
943 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
944 {
945         uint16_t port_id;
946
947         if (rx_queue_id >= dev->data->nb_rx_queues) {
948                 port_id = dev->data->port_id;
949                 RTE_ETHDEV_LOG(ERR,
950                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
951                                rx_queue_id, port_id);
952                 return -EINVAL;
953         }
954
955         if (dev->data->rx_queues[rx_queue_id] == NULL) {
956                 port_id = dev->data->port_id;
957                 RTE_ETHDEV_LOG(ERR,
958                                "Queue %u of device with port_id=%u has not been setup\n",
959                                rx_queue_id, port_id);
960                 return -EINVAL;
961         }
962
963         return 0;
964 }
965
966 static int
967 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
968 {
969         uint16_t port_id;
970
971         if (tx_queue_id >= dev->data->nb_tx_queues) {
972                 port_id = dev->data->port_id;
973                 RTE_ETHDEV_LOG(ERR,
974                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
975                                tx_queue_id, port_id);
976                 return -EINVAL;
977         }
978
979         if (dev->data->tx_queues[tx_queue_id] == NULL) {
980                 port_id = dev->data->port_id;
981                 RTE_ETHDEV_LOG(ERR,
982                                "Queue %u of device with port_id=%u has not been setup\n",
983                                tx_queue_id, port_id);
984                 return -EINVAL;
985         }
986
987         return 0;
988 }
989
990 int
991 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
992 {
993         struct rte_eth_dev *dev;
994         int ret;
995
996         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
997         dev = &rte_eth_devices[port_id];
998
999         if (!dev->data->dev_started) {
1000                 RTE_ETHDEV_LOG(ERR,
1001                         "Port %u must be started before start any queue\n",
1002                         port_id);
1003                 return -EINVAL;
1004         }
1005
1006         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1007         if (ret != 0)
1008                 return ret;
1009
1010         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
1011
1012         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1013                 RTE_ETHDEV_LOG(INFO,
1014                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1015                         rx_queue_id, port_id);
1016                 return -EINVAL;
1017         }
1018
1019         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1020                 RTE_ETHDEV_LOG(INFO,
1021                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1022                         rx_queue_id, port_id);
1023                 return 0;
1024         }
1025
1026         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
1027 }
1028
1029 int
1030 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
1031 {
1032         struct rte_eth_dev *dev;
1033         int ret;
1034
1035         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1036         dev = &rte_eth_devices[port_id];
1037
1038         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1039         if (ret != 0)
1040                 return ret;
1041
1042         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
1043
1044         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1045                 RTE_ETHDEV_LOG(INFO,
1046                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1047                         rx_queue_id, port_id);
1048                 return -EINVAL;
1049         }
1050
1051         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1052                 RTE_ETHDEV_LOG(INFO,
1053                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1054                         rx_queue_id, port_id);
1055                 return 0;
1056         }
1057
1058         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1059 }
1060
1061 int
1062 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1063 {
1064         struct rte_eth_dev *dev;
1065         int ret;
1066
1067         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1068         dev = &rte_eth_devices[port_id];
1069
1070         if (!dev->data->dev_started) {
1071                 RTE_ETHDEV_LOG(ERR,
1072                         "Port %u must be started before start any queue\n",
1073                         port_id);
1074                 return -EINVAL;
1075         }
1076
1077         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1078         if (ret != 0)
1079                 return ret;
1080
1081         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1082
1083         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1084                 RTE_ETHDEV_LOG(INFO,
1085                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1086                         tx_queue_id, port_id);
1087                 return -EINVAL;
1088         }
1089
1090         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1091                 RTE_ETHDEV_LOG(INFO,
1092                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1093                         tx_queue_id, port_id);
1094                 return 0;
1095         }
1096
1097         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1098 }
1099
1100 int
1101 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1102 {
1103         struct rte_eth_dev *dev;
1104         int ret;
1105
1106         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1107         dev = &rte_eth_devices[port_id];
1108
1109         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1110         if (ret != 0)
1111                 return ret;
1112
1113         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1114
1115         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1116                 RTE_ETHDEV_LOG(INFO,
1117                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1118                         tx_queue_id, port_id);
1119                 return -EINVAL;
1120         }
1121
1122         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1123                 RTE_ETHDEV_LOG(INFO,
1124                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1125                         tx_queue_id, port_id);
1126                 return 0;
1127         }
1128
1129         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1130 }
1131
1132 static int
1133 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1134 {
1135         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1136         void **txq;
1137         unsigned i;
1138
1139         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1140                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1141                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1142                                                    RTE_CACHE_LINE_SIZE);
1143                 if (dev->data->tx_queues == NULL) {
1144                         dev->data->nb_tx_queues = 0;
1145                         return -(ENOMEM);
1146                 }
1147         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1148                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1149
1150                 txq = dev->data->tx_queues;
1151
1152                 for (i = nb_queues; i < old_nb_queues; i++)
1153                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1154                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1155                                   RTE_CACHE_LINE_SIZE);
1156                 if (txq == NULL)
1157                         return -ENOMEM;
1158                 if (nb_queues > old_nb_queues) {
1159                         uint16_t new_qs = nb_queues - old_nb_queues;
1160
1161                         memset(txq + old_nb_queues, 0,
1162                                sizeof(txq[0]) * new_qs);
1163                 }
1164
1165                 dev->data->tx_queues = txq;
1166
1167         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1168                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1169
1170                 txq = dev->data->tx_queues;
1171
1172                 for (i = nb_queues; i < old_nb_queues; i++)
1173                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1174
1175                 rte_free(dev->data->tx_queues);
1176                 dev->data->tx_queues = NULL;
1177         }
1178         dev->data->nb_tx_queues = nb_queues;
1179         return 0;
1180 }
1181
1182 uint32_t
1183 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1184 {
1185         switch (speed) {
1186         case ETH_SPEED_NUM_10M:
1187                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1188         case ETH_SPEED_NUM_100M:
1189                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1190         case ETH_SPEED_NUM_1G:
1191                 return ETH_LINK_SPEED_1G;
1192         case ETH_SPEED_NUM_2_5G:
1193                 return ETH_LINK_SPEED_2_5G;
1194         case ETH_SPEED_NUM_5G:
1195                 return ETH_LINK_SPEED_5G;
1196         case ETH_SPEED_NUM_10G:
1197                 return ETH_LINK_SPEED_10G;
1198         case ETH_SPEED_NUM_20G:
1199                 return ETH_LINK_SPEED_20G;
1200         case ETH_SPEED_NUM_25G:
1201                 return ETH_LINK_SPEED_25G;
1202         case ETH_SPEED_NUM_40G:
1203                 return ETH_LINK_SPEED_40G;
1204         case ETH_SPEED_NUM_50G:
1205                 return ETH_LINK_SPEED_50G;
1206         case ETH_SPEED_NUM_56G:
1207                 return ETH_LINK_SPEED_56G;
1208         case ETH_SPEED_NUM_100G:
1209                 return ETH_LINK_SPEED_100G;
1210         case ETH_SPEED_NUM_200G:
1211                 return ETH_LINK_SPEED_200G;
1212         default:
1213                 return 0;
1214         }
1215 }
1216
1217 const char *
1218 rte_eth_dev_rx_offload_name(uint64_t offload)
1219 {
1220         const char *name = "UNKNOWN";
1221         unsigned int i;
1222
1223         for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1224                 if (offload == eth_dev_rx_offload_names[i].offload) {
1225                         name = eth_dev_rx_offload_names[i].name;
1226                         break;
1227                 }
1228         }
1229
1230         return name;
1231 }
1232
1233 const char *
1234 rte_eth_dev_tx_offload_name(uint64_t offload)
1235 {
1236         const char *name = "UNKNOWN";
1237         unsigned int i;
1238
1239         for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1240                 if (offload == eth_dev_tx_offload_names[i].offload) {
1241                         name = eth_dev_tx_offload_names[i].name;
1242                         break;
1243                 }
1244         }
1245
1246         return name;
1247 }
1248
1249 static inline int
1250 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1251                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1252 {
1253         int ret = 0;
1254
1255         if (dev_info_size == 0) {
1256                 if (config_size != max_rx_pkt_len) {
1257                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1258                                        " %u != %u is not allowed\n",
1259                                        port_id, config_size, max_rx_pkt_len);
1260                         ret = -EINVAL;
1261                 }
1262         } else if (config_size > dev_info_size) {
1263                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1264                                "> max allowed value %u\n", port_id, config_size,
1265                                dev_info_size);
1266                 ret = -EINVAL;
1267         } else if (config_size < RTE_ETHER_MIN_LEN) {
1268                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1269                                "< min allowed value %u\n", port_id, config_size,
1270                                (unsigned int)RTE_ETHER_MIN_LEN);
1271                 ret = -EINVAL;
1272         }
1273         return ret;
1274 }
1275
1276 /*
1277  * Validate offloads that are requested through rte_eth_dev_configure against
1278  * the offloads successfully set by the ethernet device.
1279  *
1280  * @param port_id
1281  *   The port identifier of the Ethernet device.
1282  * @param req_offloads
1283  *   The offloads that have been requested through `rte_eth_dev_configure`.
1284  * @param set_offloads
1285  *   The offloads successfully set by the ethernet device.
1286  * @param offload_type
1287  *   The offload type i.e. Rx/Tx string.
1288  * @param offload_name
1289  *   The function that prints the offload name.
1290  * @return
1291  *   - (0) if validation successful.
1292  *   - (-EINVAL) if requested offload has been silently disabled.
1293  *
1294  */
1295 static int
1296 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1297                   uint64_t set_offloads, const char *offload_type,
1298                   const char *(*offload_name)(uint64_t))
1299 {
1300         uint64_t offloads_diff = req_offloads ^ set_offloads;
1301         uint64_t offload;
1302         int ret = 0;
1303
1304         while (offloads_diff != 0) {
1305                 /* Check if any offload is requested but not enabled. */
1306                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1307                 if (offload & req_offloads) {
1308                         RTE_ETHDEV_LOG(ERR,
1309                                 "Port %u failed to enable %s offload %s\n",
1310                                 port_id, offload_type, offload_name(offload));
1311                         ret = -EINVAL;
1312                 }
1313
1314                 /* Check if offload couldn't be disabled. */
1315                 if (offload & set_offloads) {
1316                         RTE_ETHDEV_LOG(DEBUG,
1317                                 "Port %u %s offload %s is not requested but enabled\n",
1318                                 port_id, offload_type, offload_name(offload));
1319                 }
1320
1321                 offloads_diff &= ~offload;
1322         }
1323
1324         return ret;
1325 }
1326
1327 int
1328 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1329                       const struct rte_eth_conf *dev_conf)
1330 {
1331         struct rte_eth_dev *dev;
1332         struct rte_eth_dev_info dev_info;
1333         struct rte_eth_conf orig_conf;
1334         uint16_t overhead_len;
1335         int diag;
1336         int ret;
1337         uint16_t old_mtu;
1338
1339         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1340         dev = &rte_eth_devices[port_id];
1341
1342         if (dev_conf == NULL) {
1343                 RTE_ETHDEV_LOG(ERR,
1344                         "Cannot configure ethdev port %u from NULL config\n",
1345                         port_id);
1346                 return -EINVAL;
1347         }
1348
1349         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1350
1351         if (dev->data->dev_started) {
1352                 RTE_ETHDEV_LOG(ERR,
1353                         "Port %u must be stopped to allow configuration\n",
1354                         port_id);
1355                 return -EBUSY;
1356         }
1357
1358         /*
1359          * Ensure that "dev_configured" is always 0 each time prepare to do
1360          * dev_configure() to avoid any non-anticipated behaviour.
1361          * And set to 1 when dev_configure() is executed successfully.
1362          */
1363         dev->data->dev_configured = 0;
1364
1365          /* Store original config, as rollback required on failure */
1366         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1367
1368         /*
1369          * Copy the dev_conf parameter into the dev structure.
1370          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1371          */
1372         if (dev_conf != &dev->data->dev_conf)
1373                 memcpy(&dev->data->dev_conf, dev_conf,
1374                        sizeof(dev->data->dev_conf));
1375
1376         /* Backup mtu for rollback */
1377         old_mtu = dev->data->mtu;
1378
1379         ret = rte_eth_dev_info_get(port_id, &dev_info);
1380         if (ret != 0)
1381                 goto rollback;
1382
1383         /* Get the real Ethernet overhead length */
1384         if (dev_info.max_mtu != UINT16_MAX &&
1385             dev_info.max_rx_pktlen > dev_info.max_mtu)
1386                 overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
1387         else
1388                 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1389
1390         /* If number of queues specified by application for both Rx and Tx is
1391          * zero, use driver preferred values. This cannot be done individually
1392          * as it is valid for either Tx or Rx (but not both) to be zero.
1393          * If driver does not provide any preferred valued, fall back on
1394          * EAL defaults.
1395          */
1396         if (nb_rx_q == 0 && nb_tx_q == 0) {
1397                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1398                 if (nb_rx_q == 0)
1399                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1400                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1401                 if (nb_tx_q == 0)
1402                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1403         }
1404
1405         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1406                 RTE_ETHDEV_LOG(ERR,
1407                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1408                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1409                 ret = -EINVAL;
1410                 goto rollback;
1411         }
1412
1413         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1414                 RTE_ETHDEV_LOG(ERR,
1415                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1416                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1417                 ret = -EINVAL;
1418                 goto rollback;
1419         }
1420
1421         /*
1422          * Check that the numbers of RX and TX queues are not greater
1423          * than the maximum number of RX and TX queues supported by the
1424          * configured device.
1425          */
1426         if (nb_rx_q > dev_info.max_rx_queues) {
1427                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1428                         port_id, nb_rx_q, dev_info.max_rx_queues);
1429                 ret = -EINVAL;
1430                 goto rollback;
1431         }
1432
1433         if (nb_tx_q > dev_info.max_tx_queues) {
1434                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1435                         port_id, nb_tx_q, dev_info.max_tx_queues);
1436                 ret = -EINVAL;
1437                 goto rollback;
1438         }
1439
1440         /* Check that the device supports requested interrupts */
1441         if ((dev_conf->intr_conf.lsc == 1) &&
1442                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1443                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1444                         dev->device->driver->name);
1445                 ret = -EINVAL;
1446                 goto rollback;
1447         }
1448         if ((dev_conf->intr_conf.rmv == 1) &&
1449                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1450                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1451                         dev->device->driver->name);
1452                 ret = -EINVAL;
1453                 goto rollback;
1454         }
1455
1456         /*
1457          * If jumbo frames are enabled, check that the maximum RX packet
1458          * length is supported by the configured device.
1459          */
1460         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1461                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1462                         RTE_ETHDEV_LOG(ERR,
1463                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1464                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1465                                 dev_info.max_rx_pktlen);
1466                         ret = -EINVAL;
1467                         goto rollback;
1468                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1469                         RTE_ETHDEV_LOG(ERR,
1470                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1471                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1472                                 (unsigned int)RTE_ETHER_MIN_LEN);
1473                         ret = -EINVAL;
1474                         goto rollback;
1475                 }
1476
1477                 /* Scale the MTU size to adapt max_rx_pkt_len */
1478                 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
1479                                 overhead_len;
1480         } else {
1481                 uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
1482                 if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
1483                     pktlen > RTE_ETHER_MTU + overhead_len)
1484                         /* Use default value */
1485                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1486                                                 RTE_ETHER_MTU + overhead_len;
1487         }
1488
1489         /*
1490          * If LRO is enabled, check that the maximum aggregated packet
1491          * size is supported by the configured device.
1492          */
1493         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1494                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1495                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1496                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1497                 ret = eth_dev_check_lro_pkt_size(port_id,
1498                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1499                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1500                                 dev_info.max_lro_pkt_size);
1501                 if (ret != 0)
1502                         goto rollback;
1503         }
1504
1505         /* Any requested offloading must be within its device capabilities */
1506         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1507              dev_conf->rxmode.offloads) {
1508                 RTE_ETHDEV_LOG(ERR,
1509                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1510                         "capabilities 0x%"PRIx64" in %s()\n",
1511                         port_id, dev_conf->rxmode.offloads,
1512                         dev_info.rx_offload_capa,
1513                         __func__);
1514                 ret = -EINVAL;
1515                 goto rollback;
1516         }
1517         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1518              dev_conf->txmode.offloads) {
1519                 RTE_ETHDEV_LOG(ERR,
1520                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1521                         "capabilities 0x%"PRIx64" in %s()\n",
1522                         port_id, dev_conf->txmode.offloads,
1523                         dev_info.tx_offload_capa,
1524                         __func__);
1525                 ret = -EINVAL;
1526                 goto rollback;
1527         }
1528
1529         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1530                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1531
1532         /* Check that device supports requested rss hash functions. */
1533         if ((dev_info.flow_type_rss_offloads |
1534              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1535             dev_info.flow_type_rss_offloads) {
1536                 RTE_ETHDEV_LOG(ERR,
1537                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1538                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1539                         dev_info.flow_type_rss_offloads);
1540                 ret = -EINVAL;
1541                 goto rollback;
1542         }
1543
1544         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1545         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1546             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1547                 RTE_ETHDEV_LOG(ERR,
1548                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1549                         port_id,
1550                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1551                 ret = -EINVAL;
1552                 goto rollback;
1553         }
1554
1555         /*
1556          * Setup new number of RX/TX queues and reconfigure device.
1557          */
1558         diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1559         if (diag != 0) {
1560                 RTE_ETHDEV_LOG(ERR,
1561                         "Port%u eth_dev_rx_queue_config = %d\n",
1562                         port_id, diag);
1563                 ret = diag;
1564                 goto rollback;
1565         }
1566
1567         diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1568         if (diag != 0) {
1569                 RTE_ETHDEV_LOG(ERR,
1570                         "Port%u eth_dev_tx_queue_config = %d\n",
1571                         port_id, diag);
1572                 eth_dev_rx_queue_config(dev, 0);
1573                 ret = diag;
1574                 goto rollback;
1575         }
1576
1577         diag = (*dev->dev_ops->dev_configure)(dev);
1578         if (diag != 0) {
1579                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1580                         port_id, diag);
1581                 ret = eth_err(port_id, diag);
1582                 goto reset_queues;
1583         }
1584
1585         /* Initialize Rx profiling if enabled at compilation time. */
1586         diag = __rte_eth_dev_profile_init(port_id, dev);
1587         if (diag != 0) {
1588                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1589                         port_id, diag);
1590                 ret = eth_err(port_id, diag);
1591                 goto reset_queues;
1592         }
1593
1594         /* Validate Rx offloads. */
1595         diag = eth_dev_validate_offloads(port_id,
1596                         dev_conf->rxmode.offloads,
1597                         dev->data->dev_conf.rxmode.offloads, "Rx",
1598                         rte_eth_dev_rx_offload_name);
1599         if (diag != 0) {
1600                 ret = diag;
1601                 goto reset_queues;
1602         }
1603
1604         /* Validate Tx offloads. */
1605         diag = eth_dev_validate_offloads(port_id,
1606                         dev_conf->txmode.offloads,
1607                         dev->data->dev_conf.txmode.offloads, "Tx",
1608                         rte_eth_dev_tx_offload_name);
1609         if (diag != 0) {
1610                 ret = diag;
1611                 goto reset_queues;
1612         }
1613
1614         dev->data->dev_configured = 1;
1615         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1616         return 0;
1617 reset_queues:
1618         eth_dev_rx_queue_config(dev, 0);
1619         eth_dev_tx_queue_config(dev, 0);
1620 rollback:
1621         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1622         if (old_mtu != dev->data->mtu)
1623                 dev->data->mtu = old_mtu;
1624
1625         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1626         return ret;
1627 }
1628
1629 void
1630 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1631 {
1632         if (dev->data->dev_started) {
1633                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1634                         dev->data->port_id);
1635                 return;
1636         }
1637
1638         eth_dev_rx_queue_config(dev, 0);
1639         eth_dev_tx_queue_config(dev, 0);
1640
1641         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1642 }
1643
1644 static void
1645 eth_dev_mac_restore(struct rte_eth_dev *dev,
1646                         struct rte_eth_dev_info *dev_info)
1647 {
1648         struct rte_ether_addr *addr;
1649         uint16_t i;
1650         uint32_t pool = 0;
1651         uint64_t pool_mask;
1652
1653         /* replay MAC address configuration including default MAC */
1654         addr = &dev->data->mac_addrs[0];
1655         if (*dev->dev_ops->mac_addr_set != NULL)
1656                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1657         else if (*dev->dev_ops->mac_addr_add != NULL)
1658                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1659
1660         if (*dev->dev_ops->mac_addr_add != NULL) {
1661                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1662                         addr = &dev->data->mac_addrs[i];
1663
1664                         /* skip zero address */
1665                         if (rte_is_zero_ether_addr(addr))
1666                                 continue;
1667
1668                         pool = 0;
1669                         pool_mask = dev->data->mac_pool_sel[i];
1670
1671                         do {
1672                                 if (pool_mask & 1ULL)
1673                                         (*dev->dev_ops->mac_addr_add)(dev,
1674                                                 addr, i, pool);
1675                                 pool_mask >>= 1;
1676                                 pool++;
1677                         } while (pool_mask);
1678                 }
1679         }
1680 }
1681
1682 static int
1683 eth_dev_config_restore(struct rte_eth_dev *dev,
1684                 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1685 {
1686         int ret;
1687
1688         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1689                 eth_dev_mac_restore(dev, dev_info);
1690
1691         /* replay promiscuous configuration */
1692         /*
1693          * use callbacks directly since we don't need port_id check and
1694          * would like to bypass the same value set
1695          */
1696         if (rte_eth_promiscuous_get(port_id) == 1 &&
1697             *dev->dev_ops->promiscuous_enable != NULL) {
1698                 ret = eth_err(port_id,
1699                               (*dev->dev_ops->promiscuous_enable)(dev));
1700                 if (ret != 0 && ret != -ENOTSUP) {
1701                         RTE_ETHDEV_LOG(ERR,
1702                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1703                                 port_id, rte_strerror(-ret));
1704                         return ret;
1705                 }
1706         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1707                    *dev->dev_ops->promiscuous_disable != NULL) {
1708                 ret = eth_err(port_id,
1709                               (*dev->dev_ops->promiscuous_disable)(dev));
1710                 if (ret != 0 && ret != -ENOTSUP) {
1711                         RTE_ETHDEV_LOG(ERR,
1712                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1713                                 port_id, rte_strerror(-ret));
1714                         return ret;
1715                 }
1716         }
1717
1718         /* replay all multicast configuration */
1719         /*
1720          * use callbacks directly since we don't need port_id check and
1721          * would like to bypass the same value set
1722          */
1723         if (rte_eth_allmulticast_get(port_id) == 1 &&
1724             *dev->dev_ops->allmulticast_enable != NULL) {
1725                 ret = eth_err(port_id,
1726                               (*dev->dev_ops->allmulticast_enable)(dev));
1727                 if (ret != 0 && ret != -ENOTSUP) {
1728                         RTE_ETHDEV_LOG(ERR,
1729                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1730                                 port_id, rte_strerror(-ret));
1731                         return ret;
1732                 }
1733         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1734                    *dev->dev_ops->allmulticast_disable != NULL) {
1735                 ret = eth_err(port_id,
1736                               (*dev->dev_ops->allmulticast_disable)(dev));
1737                 if (ret != 0 && ret != -ENOTSUP) {
1738                         RTE_ETHDEV_LOG(ERR,
1739                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1740                                 port_id, rte_strerror(-ret));
1741                         return ret;
1742                 }
1743         }
1744
1745         return 0;
1746 }
1747
1748 int
1749 rte_eth_dev_start(uint16_t port_id)
1750 {
1751         struct rte_eth_dev *dev;
1752         struct rte_eth_dev_info dev_info;
1753         int diag;
1754         int ret, ret_stop;
1755
1756         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1757         dev = &rte_eth_devices[port_id];
1758
1759         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1760
1761         if (dev->data->dev_configured == 0) {
1762                 RTE_ETHDEV_LOG(INFO,
1763                         "Device with port_id=%"PRIu16" is not configured.\n",
1764                         port_id);
1765                 return -EINVAL;
1766         }
1767
1768         if (dev->data->dev_started != 0) {
1769                 RTE_ETHDEV_LOG(INFO,
1770                         "Device with port_id=%"PRIu16" already started\n",
1771                         port_id);
1772                 return 0;
1773         }
1774
1775         ret = rte_eth_dev_info_get(port_id, &dev_info);
1776         if (ret != 0)
1777                 return ret;
1778
1779         /* Lets restore MAC now if device does not support live change */
1780         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1781                 eth_dev_mac_restore(dev, &dev_info);
1782
1783         diag = (*dev->dev_ops->dev_start)(dev);
1784         if (diag == 0)
1785                 dev->data->dev_started = 1;
1786         else
1787                 return eth_err(port_id, diag);
1788
1789         ret = eth_dev_config_restore(dev, &dev_info, port_id);
1790         if (ret != 0) {
1791                 RTE_ETHDEV_LOG(ERR,
1792                         "Error during restoring configuration for device (port %u): %s\n",
1793                         port_id, rte_strerror(-ret));
1794                 ret_stop = rte_eth_dev_stop(port_id);
1795                 if (ret_stop != 0) {
1796                         RTE_ETHDEV_LOG(ERR,
1797                                 "Failed to stop device (port %u): %s\n",
1798                                 port_id, rte_strerror(-ret_stop));
1799                 }
1800
1801                 return ret;
1802         }
1803
1804         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1805                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1806                 (*dev->dev_ops->link_update)(dev, 0);
1807         }
1808
1809         rte_ethdev_trace_start(port_id);
1810         return 0;
1811 }
1812
1813 int
1814 rte_eth_dev_stop(uint16_t port_id)
1815 {
1816         struct rte_eth_dev *dev;
1817         int ret;
1818
1819         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1820         dev = &rte_eth_devices[port_id];
1821
1822         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1823
1824         if (dev->data->dev_started == 0) {
1825                 RTE_ETHDEV_LOG(INFO,
1826                         "Device with port_id=%"PRIu16" already stopped\n",
1827                         port_id);
1828                 return 0;
1829         }
1830
1831         dev->data->dev_started = 0;
1832         ret = (*dev->dev_ops->dev_stop)(dev);
1833         rte_ethdev_trace_stop(port_id, ret);
1834
1835         return ret;
1836 }
1837
1838 int
1839 rte_eth_dev_set_link_up(uint16_t port_id)
1840 {
1841         struct rte_eth_dev *dev;
1842
1843         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1844         dev = &rte_eth_devices[port_id];
1845
1846         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1847         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1848 }
1849
1850 int
1851 rte_eth_dev_set_link_down(uint16_t port_id)
1852 {
1853         struct rte_eth_dev *dev;
1854
1855         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1856         dev = &rte_eth_devices[port_id];
1857
1858         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1859         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1860 }
1861
1862 int
1863 rte_eth_dev_close(uint16_t port_id)
1864 {
1865         struct rte_eth_dev *dev;
1866         int firsterr, binerr;
1867         int *lasterr = &firsterr;
1868
1869         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1870         dev = &rte_eth_devices[port_id];
1871
1872         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1873         *lasterr = (*dev->dev_ops->dev_close)(dev);
1874         if (*lasterr != 0)
1875                 lasterr = &binerr;
1876
1877         rte_ethdev_trace_close(port_id);
1878         *lasterr = rte_eth_dev_release_port(dev);
1879
1880         return firsterr;
1881 }
1882
1883 int
1884 rte_eth_dev_reset(uint16_t port_id)
1885 {
1886         struct rte_eth_dev *dev;
1887         int ret;
1888
1889         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1890         dev = &rte_eth_devices[port_id];
1891
1892         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1893
1894         ret = rte_eth_dev_stop(port_id);
1895         if (ret != 0) {
1896                 RTE_ETHDEV_LOG(ERR,
1897                         "Failed to stop device (port %u) before reset: %s - ignore\n",
1898                         port_id, rte_strerror(-ret));
1899         }
1900         ret = dev->dev_ops->dev_reset(dev);
1901
1902         return eth_err(port_id, ret);
1903 }
1904
1905 int
1906 rte_eth_dev_is_removed(uint16_t port_id)
1907 {
1908         struct rte_eth_dev *dev;
1909         int ret;
1910
1911         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1912         dev = &rte_eth_devices[port_id];
1913
1914         if (dev->state == RTE_ETH_DEV_REMOVED)
1915                 return 1;
1916
1917         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1918
1919         ret = dev->dev_ops->is_removed(dev);
1920         if (ret != 0)
1921                 /* Device is physically removed. */
1922                 dev->state = RTE_ETH_DEV_REMOVED;
1923
1924         return ret;
1925 }
1926
1927 static int
1928 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1929                              uint16_t n_seg, uint32_t *mbp_buf_size,
1930                              const struct rte_eth_dev_info *dev_info)
1931 {
1932         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1933         struct rte_mempool *mp_first;
1934         uint32_t offset_mask;
1935         uint16_t seg_idx;
1936
1937         if (n_seg > seg_capa->max_nseg) {
1938                 RTE_ETHDEV_LOG(ERR,
1939                                "Requested Rx segments %u exceed supported %u\n",
1940                                n_seg, seg_capa->max_nseg);
1941                 return -EINVAL;
1942         }
1943         /*
1944          * Check the sizes and offsets against buffer sizes
1945          * for each segment specified in extended configuration.
1946          */
1947         mp_first = rx_seg[0].mp;
1948         offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1949         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1950                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1951                 uint32_t length = rx_seg[seg_idx].length;
1952                 uint32_t offset = rx_seg[seg_idx].offset;
1953
1954                 if (mpl == NULL) {
1955                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1956                         return -EINVAL;
1957                 }
1958                 if (seg_idx != 0 && mp_first != mpl &&
1959                     seg_capa->multi_pools == 0) {
1960                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1961                         return -ENOTSUP;
1962                 }
1963                 if (offset != 0) {
1964                         if (seg_capa->offset_allowed == 0) {
1965                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1966                                 return -ENOTSUP;
1967                         }
1968                         if (offset & offset_mask) {
1969                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1970                                                offset,
1971                                                seg_capa->offset_align_log2);
1972                                 return -EINVAL;
1973                         }
1974                 }
1975                 if (mpl->private_data_size <
1976                         sizeof(struct rte_pktmbuf_pool_private)) {
1977                         RTE_ETHDEV_LOG(ERR,
1978                                        "%s private_data_size %u < %u\n",
1979                                        mpl->name, mpl->private_data_size,
1980                                        (unsigned int)sizeof
1981                                         (struct rte_pktmbuf_pool_private));
1982                         return -ENOSPC;
1983                 }
1984                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1985                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1986                 length = length != 0 ? length : *mbp_buf_size;
1987                 if (*mbp_buf_size < length + offset) {
1988                         RTE_ETHDEV_LOG(ERR,
1989                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1990                                        mpl->name, *mbp_buf_size,
1991                                        length + offset, length, offset);
1992                         return -EINVAL;
1993                 }
1994         }
1995         return 0;
1996 }
1997
1998 int
1999 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2000                        uint16_t nb_rx_desc, unsigned int socket_id,
2001                        const struct rte_eth_rxconf *rx_conf,
2002                        struct rte_mempool *mp)
2003 {
2004         int ret;
2005         uint32_t mbp_buf_size;
2006         struct rte_eth_dev *dev;
2007         struct rte_eth_dev_info dev_info;
2008         struct rte_eth_rxconf local_conf;
2009         void **rxq;
2010
2011         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2012         dev = &rte_eth_devices[port_id];
2013
2014         if (rx_queue_id >= dev->data->nb_rx_queues) {
2015                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2016                 return -EINVAL;
2017         }
2018
2019         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
2020
2021         ret = rte_eth_dev_info_get(port_id, &dev_info);
2022         if (ret != 0)
2023                 return ret;
2024
2025         if (mp != NULL) {
2026                 /* Single pool configuration check. */
2027                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
2028                         RTE_ETHDEV_LOG(ERR,
2029                                        "Ambiguous segment configuration\n");
2030                         return -EINVAL;
2031                 }
2032                 /*
2033                  * Check the size of the mbuf data buffer, this value
2034                  * must be provided in the private data of the memory pool.
2035                  * First check that the memory pool(s) has a valid private data.
2036                  */
2037                 if (mp->private_data_size <
2038                                 sizeof(struct rte_pktmbuf_pool_private)) {
2039                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
2040                                 mp->name, mp->private_data_size,
2041                                 (unsigned int)
2042                                 sizeof(struct rte_pktmbuf_pool_private));
2043                         return -ENOSPC;
2044                 }
2045                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
2046                 if (mbp_buf_size < dev_info.min_rx_bufsize +
2047                                    RTE_PKTMBUF_HEADROOM) {
2048                         RTE_ETHDEV_LOG(ERR,
2049                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
2050                                        mp->name, mbp_buf_size,
2051                                        RTE_PKTMBUF_HEADROOM +
2052                                        dev_info.min_rx_bufsize,
2053                                        RTE_PKTMBUF_HEADROOM,
2054                                        dev_info.min_rx_bufsize);
2055                         return -EINVAL;
2056                 }
2057         } else {
2058                 const struct rte_eth_rxseg_split *rx_seg;
2059                 uint16_t n_seg;
2060
2061                 /* Extended multi-segment configuration check. */
2062                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
2063                         RTE_ETHDEV_LOG(ERR,
2064                                        "Memory pool is null and no extended configuration provided\n");
2065                         return -EINVAL;
2066                 }
2067
2068                 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2069                 n_seg = rx_conf->rx_nseg;
2070
2071                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2072                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
2073                                                            &mbp_buf_size,
2074                                                            &dev_info);
2075                         if (ret != 0)
2076                                 return ret;
2077                 } else {
2078                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2079                         return -EINVAL;
2080                 }
2081         }
2082
2083         /* Use default specified by driver, if nb_rx_desc is zero */
2084         if (nb_rx_desc == 0) {
2085                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
2086                 /* If driver default is also zero, fall back on EAL default */
2087                 if (nb_rx_desc == 0)
2088                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2089         }
2090
2091         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2092                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2093                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2094
2095                 RTE_ETHDEV_LOG(ERR,
2096                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2097                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2098                         dev_info.rx_desc_lim.nb_min,
2099                         dev_info.rx_desc_lim.nb_align);
2100                 return -EINVAL;
2101         }
2102
2103         if (dev->data->dev_started &&
2104                 !(dev_info.dev_capa &
2105                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2106                 return -EBUSY;
2107
2108         if (dev->data->dev_started &&
2109                 (dev->data->rx_queue_state[rx_queue_id] !=
2110                         RTE_ETH_QUEUE_STATE_STOPPED))
2111                 return -EBUSY;
2112
2113         rxq = dev->data->rx_queues;
2114         if (rxq[rx_queue_id]) {
2115                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2116                                         -ENOTSUP);
2117                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2118                 rxq[rx_queue_id] = NULL;
2119         }
2120
2121         if (rx_conf == NULL)
2122                 rx_conf = &dev_info.default_rxconf;
2123
2124         local_conf = *rx_conf;
2125
2126         /*
2127          * If an offloading has already been enabled in
2128          * rte_eth_dev_configure(), it has been enabled on all queues,
2129          * so there is no need to enable it in this queue again.
2130          * The local_conf.offloads input to underlying PMD only carries
2131          * those offloadings which are only enabled on this queue and
2132          * not enabled on all queues.
2133          */
2134         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2135
2136         /*
2137          * New added offloadings for this queue are those not enabled in
2138          * rte_eth_dev_configure() and they must be per-queue type.
2139          * A pure per-port offloading can't be enabled on a queue while
2140          * disabled on another queue. A pure per-port offloading can't
2141          * be enabled for any queue as new added one if it hasn't been
2142          * enabled in rte_eth_dev_configure().
2143          */
2144         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2145              local_conf.offloads) {
2146                 RTE_ETHDEV_LOG(ERR,
2147                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2148                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2149                         port_id, rx_queue_id, local_conf.offloads,
2150                         dev_info.rx_queue_offload_capa,
2151                         __func__);
2152                 return -EINVAL;
2153         }
2154
2155         /*
2156          * If LRO is enabled, check that the maximum aggregated packet
2157          * size is supported by the configured device.
2158          */
2159         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2160                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2161                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
2162                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2163                 int ret = eth_dev_check_lro_pkt_size(port_id,
2164                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2165                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2166                                 dev_info.max_lro_pkt_size);
2167                 if (ret != 0)
2168                         return ret;
2169         }
2170
2171         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2172                                               socket_id, &local_conf, mp);
2173         if (!ret) {
2174                 if (!dev->data->min_rx_buf_size ||
2175                     dev->data->min_rx_buf_size > mbp_buf_size)
2176                         dev->data->min_rx_buf_size = mbp_buf_size;
2177         }
2178
2179         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2180                 rx_conf, ret);
2181         return eth_err(port_id, ret);
2182 }
2183
2184 int
2185 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2186                                uint16_t nb_rx_desc,
2187                                const struct rte_eth_hairpin_conf *conf)
2188 {
2189         int ret;
2190         struct rte_eth_dev *dev;
2191         struct rte_eth_hairpin_cap cap;
2192         void **rxq;
2193         int i;
2194         int count;
2195
2196         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2197         dev = &rte_eth_devices[port_id];
2198
2199         if (rx_queue_id >= dev->data->nb_rx_queues) {
2200                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2201                 return -EINVAL;
2202         }
2203
2204         if (conf == NULL) {
2205                 RTE_ETHDEV_LOG(ERR,
2206                         "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
2207                         port_id);
2208                 return -EINVAL;
2209         }
2210
2211         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2212         if (ret != 0)
2213                 return ret;
2214         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2215                                 -ENOTSUP);
2216         /* if nb_rx_desc is zero use max number of desc from the driver. */
2217         if (nb_rx_desc == 0)
2218                 nb_rx_desc = cap.max_nb_desc;
2219         if (nb_rx_desc > cap.max_nb_desc) {
2220                 RTE_ETHDEV_LOG(ERR,
2221                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2222                         nb_rx_desc, cap.max_nb_desc);
2223                 return -EINVAL;
2224         }
2225         if (conf->peer_count > cap.max_rx_2_tx) {
2226                 RTE_ETHDEV_LOG(ERR,
2227                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2228                         conf->peer_count, cap.max_rx_2_tx);
2229                 return -EINVAL;
2230         }
2231         if (conf->peer_count == 0) {
2232                 RTE_ETHDEV_LOG(ERR,
2233                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2234                         conf->peer_count);
2235                 return -EINVAL;
2236         }
2237         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2238              cap.max_nb_queues != UINT16_MAX; i++) {
2239                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2240                         count++;
2241         }
2242         if (count > cap.max_nb_queues) {
2243                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2244                 cap.max_nb_queues);
2245                 return -EINVAL;
2246         }
2247         if (dev->data->dev_started)
2248                 return -EBUSY;
2249         rxq = dev->data->rx_queues;
2250         if (rxq[rx_queue_id] != NULL) {
2251                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2252                                         -ENOTSUP);
2253                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2254                 rxq[rx_queue_id] = NULL;
2255         }
2256         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2257                                                       nb_rx_desc, conf);
2258         if (ret == 0)
2259                 dev->data->rx_queue_state[rx_queue_id] =
2260                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2261         return eth_err(port_id, ret);
2262 }
2263
2264 int
2265 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2266                        uint16_t nb_tx_desc, unsigned int socket_id,
2267                        const struct rte_eth_txconf *tx_conf)
2268 {
2269         struct rte_eth_dev *dev;
2270         struct rte_eth_dev_info dev_info;
2271         struct rte_eth_txconf local_conf;
2272         void **txq;
2273         int ret;
2274
2275         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2276         dev = &rte_eth_devices[port_id];
2277
2278         if (tx_queue_id >= dev->data->nb_tx_queues) {
2279                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2280                 return -EINVAL;
2281         }
2282
2283         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2284
2285         ret = rte_eth_dev_info_get(port_id, &dev_info);
2286         if (ret != 0)
2287                 return ret;
2288
2289         /* Use default specified by driver, if nb_tx_desc is zero */
2290         if (nb_tx_desc == 0) {
2291                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2292                 /* If driver default is zero, fall back on EAL default */
2293                 if (nb_tx_desc == 0)
2294                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2295         }
2296         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2297             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2298             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2299                 RTE_ETHDEV_LOG(ERR,
2300                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2301                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2302                         dev_info.tx_desc_lim.nb_min,
2303                         dev_info.tx_desc_lim.nb_align);
2304                 return -EINVAL;
2305         }
2306
2307         if (dev->data->dev_started &&
2308                 !(dev_info.dev_capa &
2309                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2310                 return -EBUSY;
2311
2312         if (dev->data->dev_started &&
2313                 (dev->data->tx_queue_state[tx_queue_id] !=
2314                         RTE_ETH_QUEUE_STATE_STOPPED))
2315                 return -EBUSY;
2316
2317         txq = dev->data->tx_queues;
2318         if (txq[tx_queue_id]) {
2319                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2320                                         -ENOTSUP);
2321                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2322                 txq[tx_queue_id] = NULL;
2323         }
2324
2325         if (tx_conf == NULL)
2326                 tx_conf = &dev_info.default_txconf;
2327
2328         local_conf = *tx_conf;
2329
2330         /*
2331          * If an offloading has already been enabled in
2332          * rte_eth_dev_configure(), it has been enabled on all queues,
2333          * so there is no need to enable it in this queue again.
2334          * The local_conf.offloads input to underlying PMD only carries
2335          * those offloadings which are only enabled on this queue and
2336          * not enabled on all queues.
2337          */
2338         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2339
2340         /*
2341          * New added offloadings for this queue are those not enabled in
2342          * rte_eth_dev_configure() and they must be per-queue type.
2343          * A pure per-port offloading can't be enabled on a queue while
2344          * disabled on another queue. A pure per-port offloading can't
2345          * be enabled for any queue as new added one if it hasn't been
2346          * enabled in rte_eth_dev_configure().
2347          */
2348         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2349              local_conf.offloads) {
2350                 RTE_ETHDEV_LOG(ERR,
2351                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2352                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2353                         port_id, tx_queue_id, local_conf.offloads,
2354                         dev_info.tx_queue_offload_capa,
2355                         __func__);
2356                 return -EINVAL;
2357         }
2358
2359         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2360         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2361                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2362 }
2363
2364 int
2365 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2366                                uint16_t nb_tx_desc,
2367                                const struct rte_eth_hairpin_conf *conf)
2368 {
2369         struct rte_eth_dev *dev;
2370         struct rte_eth_hairpin_cap cap;
2371         void **txq;
2372         int i;
2373         int count;
2374         int ret;
2375
2376         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2377         dev = &rte_eth_devices[port_id];
2378
2379         if (tx_queue_id >= dev->data->nb_tx_queues) {
2380                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2381                 return -EINVAL;
2382         }
2383
2384         if (conf == NULL) {
2385                 RTE_ETHDEV_LOG(ERR,
2386                         "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2387                         port_id);
2388                 return -EINVAL;
2389         }
2390
2391         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2392         if (ret != 0)
2393                 return ret;
2394         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2395                                 -ENOTSUP);
2396         /* if nb_rx_desc is zero use max number of desc from the driver. */
2397         if (nb_tx_desc == 0)
2398                 nb_tx_desc = cap.max_nb_desc;
2399         if (nb_tx_desc > cap.max_nb_desc) {
2400                 RTE_ETHDEV_LOG(ERR,
2401                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2402                         nb_tx_desc, cap.max_nb_desc);
2403                 return -EINVAL;
2404         }
2405         if (conf->peer_count > cap.max_tx_2_rx) {
2406                 RTE_ETHDEV_LOG(ERR,
2407                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2408                         conf->peer_count, cap.max_tx_2_rx);
2409                 return -EINVAL;
2410         }
2411         if (conf->peer_count == 0) {
2412                 RTE_ETHDEV_LOG(ERR,
2413                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2414                         conf->peer_count);
2415                 return -EINVAL;
2416         }
2417         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2418              cap.max_nb_queues != UINT16_MAX; i++) {
2419                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2420                         count++;
2421         }
2422         if (count > cap.max_nb_queues) {
2423                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2424                 cap.max_nb_queues);
2425                 return -EINVAL;
2426         }
2427         if (dev->data->dev_started)
2428                 return -EBUSY;
2429         txq = dev->data->tx_queues;
2430         if (txq[tx_queue_id] != NULL) {
2431                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2432                                         -ENOTSUP);
2433                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2434                 txq[tx_queue_id] = NULL;
2435         }
2436         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2437                 (dev, tx_queue_id, nb_tx_desc, conf);
2438         if (ret == 0)
2439                 dev->data->tx_queue_state[tx_queue_id] =
2440                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2441         return eth_err(port_id, ret);
2442 }
2443
2444 int
2445 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2446 {
2447         struct rte_eth_dev *dev;
2448         int ret;
2449
2450         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2451         dev = &rte_eth_devices[tx_port];
2452
2453         if (dev->data->dev_started == 0) {
2454                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2455                 return -EBUSY;
2456         }
2457
2458         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2459         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2460         if (ret != 0)
2461                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2462                                " to Rx %d (%d - all ports)\n",
2463                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2464
2465         return ret;
2466 }
2467
2468 int
2469 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2470 {
2471         struct rte_eth_dev *dev;
2472         int ret;
2473
2474         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2475         dev = &rte_eth_devices[tx_port];
2476
2477         if (dev->data->dev_started == 0) {
2478                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2479                 return -EBUSY;
2480         }
2481
2482         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2483         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2484         if (ret != 0)
2485                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2486                                " from Rx %d (%d - all ports)\n",
2487                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2488
2489         return ret;
2490 }
2491
2492 int
2493 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2494                                size_t len, uint32_t direction)
2495 {
2496         struct rte_eth_dev *dev;
2497         int ret;
2498
2499         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2500         dev = &rte_eth_devices[port_id];
2501
2502         if (peer_ports == NULL) {
2503                 RTE_ETHDEV_LOG(ERR,
2504                         "Cannot get ethdev port %u hairpin peer ports to NULL\n",
2505                         port_id);
2506                 return -EINVAL;
2507         }
2508
2509         if (len == 0) {
2510                 RTE_ETHDEV_LOG(ERR,
2511                         "Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2512                         port_id);
2513                 return -EINVAL;
2514         }
2515
2516         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2517                                 -ENOTSUP);
2518
2519         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2520                                                       len, direction);
2521         if (ret < 0)
2522                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2523                                port_id, direction ? "Rx" : "Tx");
2524
2525         return ret;
2526 }
2527
2528 void
2529 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2530                 void *userdata __rte_unused)
2531 {
2532         rte_pktmbuf_free_bulk(pkts, unsent);
2533 }
2534
2535 void
2536 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2537                 void *userdata)
2538 {
2539         uint64_t *count = userdata;
2540
2541         rte_pktmbuf_free_bulk(pkts, unsent);
2542         *count += unsent;
2543 }
2544
2545 int
2546 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2547                 buffer_tx_error_fn cbfn, void *userdata)
2548 {
2549         if (buffer == NULL) {
2550                 RTE_ETHDEV_LOG(ERR,
2551                         "Cannot set Tx buffer error callback to NULL buffer\n");
2552                 return -EINVAL;
2553         }
2554
2555         buffer->error_callback = cbfn;
2556         buffer->error_userdata = userdata;
2557         return 0;
2558 }
2559
2560 int
2561 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2562 {
2563         int ret = 0;
2564
2565         if (buffer == NULL) {
2566                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2567                 return -EINVAL;
2568         }
2569
2570         buffer->size = size;
2571         if (buffer->error_callback == NULL) {
2572                 ret = rte_eth_tx_buffer_set_err_callback(
2573                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2574         }
2575
2576         return ret;
2577 }
2578
2579 int
2580 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2581 {
2582         struct rte_eth_dev *dev;
2583         int ret;
2584
2585         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2586         dev = &rte_eth_devices[port_id];
2587
2588         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2589
2590         /* Call driver to free pending mbufs. */
2591         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2592                                                free_cnt);
2593         return eth_err(port_id, ret);
2594 }
2595
2596 int
2597 rte_eth_promiscuous_enable(uint16_t port_id)
2598 {
2599         struct rte_eth_dev *dev;
2600         int diag = 0;
2601
2602         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2603         dev = &rte_eth_devices[port_id];
2604
2605         if (dev->data->promiscuous == 1)
2606                 return 0;
2607
2608         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2609
2610         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2611         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2612
2613         return eth_err(port_id, diag);
2614 }
2615
2616 int
2617 rte_eth_promiscuous_disable(uint16_t port_id)
2618 {
2619         struct rte_eth_dev *dev;
2620         int diag = 0;
2621
2622         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2623         dev = &rte_eth_devices[port_id];
2624
2625         if (dev->data->promiscuous == 0)
2626                 return 0;
2627
2628         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2629
2630         dev->data->promiscuous = 0;
2631         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2632         if (diag != 0)
2633                 dev->data->promiscuous = 1;
2634
2635         return eth_err(port_id, diag);
2636 }
2637
2638 int
2639 rte_eth_promiscuous_get(uint16_t port_id)
2640 {
2641         struct rte_eth_dev *dev;
2642
2643         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2644         dev = &rte_eth_devices[port_id];
2645
2646         return dev->data->promiscuous;
2647 }
2648
2649 int
2650 rte_eth_allmulticast_enable(uint16_t port_id)
2651 {
2652         struct rte_eth_dev *dev;
2653         int diag;
2654
2655         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2656         dev = &rte_eth_devices[port_id];
2657
2658         if (dev->data->all_multicast == 1)
2659                 return 0;
2660
2661         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2662         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2663         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2664
2665         return eth_err(port_id, diag);
2666 }
2667
2668 int
2669 rte_eth_allmulticast_disable(uint16_t port_id)
2670 {
2671         struct rte_eth_dev *dev;
2672         int diag;
2673
2674         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2675         dev = &rte_eth_devices[port_id];
2676
2677         if (dev->data->all_multicast == 0)
2678                 return 0;
2679
2680         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2681         dev->data->all_multicast = 0;
2682         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2683         if (diag != 0)
2684                 dev->data->all_multicast = 1;
2685
2686         return eth_err(port_id, diag);
2687 }
2688
2689 int
2690 rte_eth_allmulticast_get(uint16_t port_id)
2691 {
2692         struct rte_eth_dev *dev;
2693
2694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2695         dev = &rte_eth_devices[port_id];
2696
2697         return dev->data->all_multicast;
2698 }
2699
2700 int
2701 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2702 {
2703         struct rte_eth_dev *dev;
2704
2705         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2706         dev = &rte_eth_devices[port_id];
2707
2708         if (eth_link == NULL) {
2709                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2710                         port_id);
2711                 return -EINVAL;
2712         }
2713
2714         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2715                 rte_eth_linkstatus_get(dev, eth_link);
2716         else {
2717                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2718                 (*dev->dev_ops->link_update)(dev, 1);
2719                 *eth_link = dev->data->dev_link;
2720         }
2721
2722         return 0;
2723 }
2724
2725 int
2726 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2727 {
2728         struct rte_eth_dev *dev;
2729
2730         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2731         dev = &rte_eth_devices[port_id];
2732
2733         if (eth_link == NULL) {
2734                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2735                         port_id);
2736                 return -EINVAL;
2737         }
2738
2739         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2740                 rte_eth_linkstatus_get(dev, eth_link);
2741         else {
2742                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2743                 (*dev->dev_ops->link_update)(dev, 0);
2744                 *eth_link = dev->data->dev_link;
2745         }
2746
2747         return 0;
2748 }
2749
2750 const char *
2751 rte_eth_link_speed_to_str(uint32_t link_speed)
2752 {
2753         switch (link_speed) {
2754         case ETH_SPEED_NUM_NONE: return "None";
2755         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2756         case ETH_SPEED_NUM_100M: return "100 Mbps";
2757         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2758         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2759         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2760         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2761         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2762         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2763         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2764         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2765         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2766         case ETH_SPEED_NUM_100G: return "100 Gbps";
2767         case ETH_SPEED_NUM_200G: return "200 Gbps";
2768         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2769         default: return "Invalid";
2770         }
2771 }
2772
2773 int
2774 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2775 {
2776         if (str == NULL) {
2777                 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2778                 return -EINVAL;
2779         }
2780
2781         if (len == 0) {
2782                 RTE_ETHDEV_LOG(ERR,
2783                         "Cannot convert link to string with zero size\n");
2784                 return -EINVAL;
2785         }
2786
2787         if (eth_link == NULL) {
2788                 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2789                 return -EINVAL;
2790         }
2791
2792         if (eth_link->link_status == ETH_LINK_DOWN)
2793                 return snprintf(str, len, "Link down");
2794         else
2795                 return snprintf(str, len, "Link up at %s %s %s",
2796                         rte_eth_link_speed_to_str(eth_link->link_speed),
2797                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2798                         "FDX" : "HDX",
2799                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2800                         "Autoneg" : "Fixed");
2801 }
2802
2803 int
2804 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2805 {
2806         struct rte_eth_dev *dev;
2807
2808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2809         dev = &rte_eth_devices[port_id];
2810
2811         if (stats == NULL) {
2812                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2813                         port_id);
2814                 return -EINVAL;
2815         }
2816
2817         memset(stats, 0, sizeof(*stats));
2818
2819         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2820         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2821         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2822 }
2823
2824 int
2825 rte_eth_stats_reset(uint16_t port_id)
2826 {
2827         struct rte_eth_dev *dev;
2828         int ret;
2829
2830         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2831         dev = &rte_eth_devices[port_id];
2832
2833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2834         ret = (*dev->dev_ops->stats_reset)(dev);
2835         if (ret != 0)
2836                 return eth_err(port_id, ret);
2837
2838         dev->data->rx_mbuf_alloc_failed = 0;
2839
2840         return 0;
2841 }
2842
2843 static inline int
2844 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2845 {
2846         uint16_t nb_rxqs, nb_txqs;
2847         int count;
2848
2849         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2850         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2851
2852         count = RTE_NB_STATS;
2853         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2854                 count += nb_rxqs * RTE_NB_RXQ_STATS;
2855                 count += nb_txqs * RTE_NB_TXQ_STATS;
2856         }
2857
2858         return count;
2859 }
2860
2861 static int
2862 eth_dev_get_xstats_count(uint16_t port_id)
2863 {
2864         struct rte_eth_dev *dev;
2865         int count;
2866
2867         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2868         dev = &rte_eth_devices[port_id];
2869         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2870                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2871                                 NULL, 0);
2872                 if (count < 0)
2873                         return eth_err(port_id, count);
2874         }
2875         if (dev->dev_ops->xstats_get_names != NULL) {
2876                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2877                 if (count < 0)
2878                         return eth_err(port_id, count);
2879         } else
2880                 count = 0;
2881
2882
2883         count += eth_dev_get_xstats_basic_count(dev);
2884
2885         return count;
2886 }
2887
2888 int
2889 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2890                 uint64_t *id)
2891 {
2892         int cnt_xstats, idx_xstat;
2893
2894         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2895
2896         if (xstat_name == NULL) {
2897                 RTE_ETHDEV_LOG(ERR,
2898                         "Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2899                         port_id);
2900                 return -ENOMEM;
2901         }
2902
2903         if (id == NULL) {
2904                 RTE_ETHDEV_LOG(ERR,
2905                         "Cannot get ethdev port %u xstats ID to NULL\n",
2906                         port_id);
2907                 return -ENOMEM;
2908         }
2909
2910         /* Get count */
2911         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2912         if (cnt_xstats  < 0) {
2913                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2914                 return -ENODEV;
2915         }
2916
2917         /* Get id-name lookup table */
2918         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2919
2920         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2921                         port_id, xstats_names, cnt_xstats, NULL)) {
2922                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2923                 return -1;
2924         }
2925
2926         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2927                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2928                         *id = idx_xstat;
2929                         return 0;
2930                 };
2931         }
2932
2933         return -EINVAL;
2934 }
2935
2936 /* retrieve basic stats names */
2937 static int
2938 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2939         struct rte_eth_xstat_name *xstats_names)
2940 {
2941         int cnt_used_entries = 0;
2942         uint32_t idx, id_queue;
2943         uint16_t num_q;
2944
2945         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2946                 strlcpy(xstats_names[cnt_used_entries].name,
2947                         eth_dev_stats_strings[idx].name,
2948                         sizeof(xstats_names[0].name));
2949                 cnt_used_entries++;
2950         }
2951
2952         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2953                 return cnt_used_entries;
2954
2955         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2956         for (id_queue = 0; id_queue < num_q; id_queue++) {
2957                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2958                         snprintf(xstats_names[cnt_used_entries].name,
2959                                 sizeof(xstats_names[0].name),
2960                                 "rx_q%u_%s",
2961                                 id_queue, eth_dev_rxq_stats_strings[idx].name);
2962                         cnt_used_entries++;
2963                 }
2964
2965         }
2966         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2967         for (id_queue = 0; id_queue < num_q; id_queue++) {
2968                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2969                         snprintf(xstats_names[cnt_used_entries].name,
2970                                 sizeof(xstats_names[0].name),
2971                                 "tx_q%u_%s",
2972                                 id_queue, eth_dev_txq_stats_strings[idx].name);
2973                         cnt_used_entries++;
2974                 }
2975         }
2976         return cnt_used_entries;
2977 }
2978
2979 /* retrieve ethdev extended statistics names */
2980 int
2981 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2982         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2983         uint64_t *ids)
2984 {
2985         struct rte_eth_xstat_name *xstats_names_copy;
2986         unsigned int no_basic_stat_requested = 1;
2987         unsigned int no_ext_stat_requested = 1;
2988         unsigned int expected_entries;
2989         unsigned int basic_count;
2990         struct rte_eth_dev *dev;
2991         unsigned int i;
2992         int ret;
2993
2994         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2995         dev = &rte_eth_devices[port_id];
2996
2997         basic_count = eth_dev_get_xstats_basic_count(dev);
2998         ret = eth_dev_get_xstats_count(port_id);
2999         if (ret < 0)
3000                 return ret;
3001         expected_entries = (unsigned int)ret;
3002
3003         /* Return max number of stats if no ids given */
3004         if (!ids) {
3005                 if (!xstats_names)
3006                         return expected_entries;
3007                 else if (xstats_names && size < expected_entries)
3008                         return expected_entries;
3009         }
3010
3011         if (ids && !xstats_names)
3012                 return -EINVAL;
3013
3014         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
3015                 uint64_t ids_copy[size];
3016
3017                 for (i = 0; i < size; i++) {
3018                         if (ids[i] < basic_count) {
3019                                 no_basic_stat_requested = 0;
3020                                 break;
3021                         }
3022
3023                         /*
3024                          * Convert ids to xstats ids that PMD knows.
3025                          * ids known by user are basic + extended stats.
3026                          */
3027                         ids_copy[i] = ids[i] - basic_count;
3028                 }
3029
3030                 if (no_basic_stat_requested)
3031                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
3032                                         xstats_names, ids_copy, size);
3033         }
3034
3035         /* Retrieve all stats */
3036         if (!ids) {
3037                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
3038                                 expected_entries);
3039                 if (num_stats < 0 || num_stats > (int)expected_entries)
3040                         return num_stats;
3041                 else
3042                         return expected_entries;
3043         }
3044
3045         xstats_names_copy = calloc(expected_entries,
3046                 sizeof(struct rte_eth_xstat_name));
3047
3048         if (!xstats_names_copy) {
3049                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
3050                 return -ENOMEM;
3051         }
3052
3053         if (ids) {
3054                 for (i = 0; i < size; i++) {
3055                         if (ids[i] >= basic_count) {
3056                                 no_ext_stat_requested = 0;
3057                                 break;
3058                         }
3059                 }
3060         }
3061
3062         /* Fill xstats_names_copy structure */
3063         if (ids && no_ext_stat_requested) {
3064                 eth_basic_stats_get_names(dev, xstats_names_copy);
3065         } else {
3066                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
3067                         expected_entries);
3068                 if (ret < 0) {
3069                         free(xstats_names_copy);
3070                         return ret;
3071                 }
3072         }
3073
3074         /* Filter stats */
3075         for (i = 0; i < size; i++) {
3076                 if (ids[i] >= expected_entries) {
3077                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3078                         free(xstats_names_copy);
3079                         return -1;
3080                 }
3081                 xstats_names[i] = xstats_names_copy[ids[i]];
3082         }
3083
3084         free(xstats_names_copy);
3085         return size;
3086 }
3087
3088 int
3089 rte_eth_xstats_get_names(uint16_t port_id,
3090         struct rte_eth_xstat_name *xstats_names,
3091         unsigned int size)
3092 {
3093         struct rte_eth_dev *dev;
3094         int cnt_used_entries;
3095         int cnt_expected_entries;
3096         int cnt_driver_entries;
3097
3098         cnt_expected_entries = eth_dev_get_xstats_count(port_id);
3099         if (xstats_names == NULL || cnt_expected_entries < 0 ||
3100                         (int)size < cnt_expected_entries)
3101                 return cnt_expected_entries;
3102
3103         /* port_id checked in eth_dev_get_xstats_count() */
3104         dev = &rte_eth_devices[port_id];
3105
3106         cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3107
3108         if (dev->dev_ops->xstats_get_names != NULL) {
3109                 /* If there are any driver-specific xstats, append them
3110                  * to end of list.
3111                  */
3112                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3113                         dev,
3114                         xstats_names + cnt_used_entries,
3115                         size - cnt_used_entries);
3116                 if (cnt_driver_entries < 0)
3117                         return eth_err(port_id, cnt_driver_entries);
3118                 cnt_used_entries += cnt_driver_entries;
3119         }
3120
3121         return cnt_used_entries;
3122 }
3123
3124
3125 static int
3126 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
3127 {
3128         struct rte_eth_dev *dev;
3129         struct rte_eth_stats eth_stats;
3130         unsigned int count = 0, i, q;
3131         uint64_t val, *stats_ptr;
3132         uint16_t nb_rxqs, nb_txqs;
3133         int ret;
3134
3135         ret = rte_eth_stats_get(port_id, &eth_stats);
3136         if (ret < 0)
3137                 return ret;
3138
3139         dev = &rte_eth_devices[port_id];
3140
3141         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3142         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3143
3144         /* global stats */
3145         for (i = 0; i < RTE_NB_STATS; i++) {
3146                 stats_ptr = RTE_PTR_ADD(&eth_stats,
3147                                         eth_dev_stats_strings[i].offset);
3148                 val = *stats_ptr;
3149                 xstats[count++].value = val;
3150         }
3151
3152         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3153                 return count;
3154
3155         /* per-rxq stats */
3156         for (q = 0; q < nb_rxqs; q++) {
3157                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3158                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3159                                         eth_dev_rxq_stats_strings[i].offset +
3160                                         q * sizeof(uint64_t));
3161                         val = *stats_ptr;
3162                         xstats[count++].value = val;
3163                 }
3164         }
3165
3166         /* per-txq stats */
3167         for (q = 0; q < nb_txqs; q++) {
3168                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3169                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3170                                         eth_dev_txq_stats_strings[i].offset +
3171                                         q * sizeof(uint64_t));
3172                         val = *stats_ptr;
3173                         xstats[count++].value = val;
3174                 }
3175         }
3176         return count;
3177 }
3178
3179 /* retrieve ethdev extended statistics */
3180 int
3181 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3182                          uint64_t *values, unsigned int size)
3183 {
3184         unsigned int no_basic_stat_requested = 1;
3185         unsigned int no_ext_stat_requested = 1;
3186         unsigned int num_xstats_filled;
3187         unsigned int basic_count;
3188         uint16_t expected_entries;
3189         struct rte_eth_dev *dev;
3190         unsigned int i;
3191         int ret;
3192
3193         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3194         dev = &rte_eth_devices[port_id];
3195
3196         ret = eth_dev_get_xstats_count(port_id);
3197         if (ret < 0)
3198                 return ret;
3199         expected_entries = (uint16_t)ret;
3200         struct rte_eth_xstat xstats[expected_entries];
3201         basic_count = eth_dev_get_xstats_basic_count(dev);
3202
3203         /* Return max number of stats if no ids given */
3204         if (!ids) {
3205                 if (!values)
3206                         return expected_entries;
3207                 else if (values && size < expected_entries)
3208                         return expected_entries;
3209         }
3210
3211         if (ids && !values)
3212                 return -EINVAL;
3213
3214         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3215                 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3216                 uint64_t ids_copy[size];
3217
3218                 for (i = 0; i < size; i++) {
3219                         if (ids[i] < basic_count) {
3220                                 no_basic_stat_requested = 0;
3221                                 break;
3222                         }
3223
3224                         /*
3225                          * Convert ids to xstats ids that PMD knows.
3226                          * ids known by user are basic + extended stats.
3227                          */
3228                         ids_copy[i] = ids[i] - basic_count;
3229                 }
3230
3231                 if (no_basic_stat_requested)
3232                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3233                                         values, size);
3234         }
3235
3236         if (ids) {
3237                 for (i = 0; i < size; i++) {
3238                         if (ids[i] >= basic_count) {
3239                                 no_ext_stat_requested = 0;
3240                                 break;
3241                         }
3242                 }
3243         }
3244
3245         /* Fill the xstats structure */
3246         if (ids && no_ext_stat_requested)
3247                 ret = eth_basic_stats_get(port_id, xstats);
3248         else
3249                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3250
3251         if (ret < 0)
3252                 return ret;
3253         num_xstats_filled = (unsigned int)ret;
3254
3255         /* Return all stats */
3256         if (!ids) {
3257                 for (i = 0; i < num_xstats_filled; i++)
3258                         values[i] = xstats[i].value;
3259                 return expected_entries;
3260         }
3261
3262         /* Filter stats */
3263         for (i = 0; i < size; i++) {
3264                 if (ids[i] >= expected_entries) {
3265                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3266                         return -1;
3267                 }
3268                 values[i] = xstats[ids[i]].value;
3269         }
3270         return size;
3271 }
3272
3273 int
3274 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3275         unsigned int n)
3276 {
3277         struct rte_eth_dev *dev;
3278         unsigned int count = 0, i;
3279         signed int xcount = 0;
3280         uint16_t nb_rxqs, nb_txqs;
3281         int ret;
3282
3283         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3284         dev = &rte_eth_devices[port_id];
3285
3286         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3287         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3288
3289         /* Return generic statistics */
3290         count = RTE_NB_STATS;
3291         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3292                 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3293
3294         /* implemented by the driver */
3295         if (dev->dev_ops->xstats_get != NULL) {
3296                 /* Retrieve the xstats from the driver at the end of the
3297                  * xstats struct.
3298                  */
3299                 xcount = (*dev->dev_ops->xstats_get)(dev,
3300                                      xstats ? xstats + count : NULL,
3301                                      (n > count) ? n - count : 0);
3302
3303                 if (xcount < 0)
3304                         return eth_err(port_id, xcount);
3305         }
3306
3307         if (n < count + xcount || xstats == NULL)
3308                 return count + xcount;
3309
3310         /* now fill the xstats structure */
3311         ret = eth_basic_stats_get(port_id, xstats);
3312         if (ret < 0)
3313                 return ret;
3314         count = ret;
3315
3316         for (i = 0; i < count; i++)
3317                 xstats[i].id = i;
3318         /* add an offset to driver-specific stats */
3319         for ( ; i < count + xcount; i++)
3320                 xstats[i].id += count;
3321
3322         return count + xcount;
3323 }
3324
3325 /* reset ethdev extended statistics */
3326 int
3327 rte_eth_xstats_reset(uint16_t port_id)
3328 {
3329         struct rte_eth_dev *dev;
3330
3331         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3332         dev = &rte_eth_devices[port_id];
3333
3334         /* implemented by the driver */
3335         if (dev->dev_ops->xstats_reset != NULL)
3336                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3337
3338         /* fallback to default */
3339         return rte_eth_stats_reset(port_id);
3340 }
3341
3342 static int
3343 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3344                 uint8_t stat_idx, uint8_t is_rx)
3345 {
3346         struct rte_eth_dev *dev;
3347
3348         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3349         dev = &rte_eth_devices[port_id];
3350
3351         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3352                 return -EINVAL;
3353
3354         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3355                 return -EINVAL;
3356
3357         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3358                 return -EINVAL;
3359
3360         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3361         return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3362 }
3363
3364 int
3365 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3366                 uint8_t stat_idx)
3367 {
3368         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3369                                                 tx_queue_id,
3370                                                 stat_idx, STAT_QMAP_TX));
3371 }
3372
3373 int
3374 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3375                 uint8_t stat_idx)
3376 {
3377         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3378                                                 rx_queue_id,
3379                                                 stat_idx, STAT_QMAP_RX));
3380 }
3381
3382 int
3383 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3384 {
3385         struct rte_eth_dev *dev;
3386
3387         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3388         dev = &rte_eth_devices[port_id];
3389
3390         if (fw_version == NULL && fw_size > 0) {
3391                 RTE_ETHDEV_LOG(ERR,
3392                         "Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3393                         port_id);
3394                 return -EINVAL;
3395         }
3396
3397         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3398         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3399                                                         fw_version, fw_size));
3400 }
3401
3402 int
3403 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3404 {
3405         struct rte_eth_dev *dev;
3406         const struct rte_eth_desc_lim lim = {
3407                 .nb_max = UINT16_MAX,
3408                 .nb_min = 0,
3409                 .nb_align = 1,
3410                 .nb_seg_max = UINT16_MAX,
3411                 .nb_mtu_seg_max = UINT16_MAX,
3412         };
3413         int diag;
3414
3415         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3416         dev = &rte_eth_devices[port_id];
3417
3418         if (dev_info == NULL) {
3419                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3420                         port_id);
3421                 return -EINVAL;
3422         }
3423
3424         /*
3425          * Init dev_info before port_id check since caller does not have
3426          * return status and does not know if get is successful or not.
3427          */
3428         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3429         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3430
3431         dev_info->rx_desc_lim = lim;
3432         dev_info->tx_desc_lim = lim;
3433         dev_info->device = dev->device;
3434         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3435         dev_info->max_mtu = UINT16_MAX;
3436
3437         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3438         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3439         if (diag != 0) {
3440                 /* Cleanup already filled in device information */
3441                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3442                 return eth_err(port_id, diag);
3443         }
3444
3445         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3446         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3447                         RTE_MAX_QUEUES_PER_PORT);
3448         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3449                         RTE_MAX_QUEUES_PER_PORT);
3450
3451         dev_info->driver_name = dev->device->driver->name;
3452         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3453         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3454
3455         dev_info->dev_flags = &dev->data->dev_flags;
3456
3457         return 0;
3458 }
3459
3460 int
3461 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3462                                  uint32_t *ptypes, int num)
3463 {
3464         int i, j;
3465         struct rte_eth_dev *dev;
3466         const uint32_t *all_ptypes;
3467
3468         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3469         dev = &rte_eth_devices[port_id];
3470
3471         if (ptypes == NULL && num > 0) {
3472                 RTE_ETHDEV_LOG(ERR,
3473                         "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3474                         port_id);
3475                 return -EINVAL;
3476         }
3477
3478         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3479         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3480
3481         if (!all_ptypes)
3482                 return 0;
3483
3484         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3485                 if (all_ptypes[i] & ptype_mask) {
3486                         if (j < num)
3487                                 ptypes[j] = all_ptypes[i];
3488                         j++;
3489                 }
3490
3491         return j;
3492 }
3493
3494 int
3495 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3496                                  uint32_t *set_ptypes, unsigned int num)
3497 {
3498         const uint32_t valid_ptype_masks[] = {
3499                 RTE_PTYPE_L2_MASK,
3500                 RTE_PTYPE_L3_MASK,
3501                 RTE_PTYPE_L4_MASK,
3502                 RTE_PTYPE_TUNNEL_MASK,
3503                 RTE_PTYPE_INNER_L2_MASK,
3504                 RTE_PTYPE_INNER_L3_MASK,
3505                 RTE_PTYPE_INNER_L4_MASK,
3506         };
3507         const uint32_t *all_ptypes;
3508         struct rte_eth_dev *dev;
3509         uint32_t unused_mask;
3510         unsigned int i, j;
3511         int ret;
3512
3513         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3514         dev = &rte_eth_devices[port_id];
3515
3516         if (num > 0 && set_ptypes == NULL) {
3517                 RTE_ETHDEV_LOG(ERR,
3518                         "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3519                         port_id);
3520                 return -EINVAL;
3521         }
3522
3523         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3524                         *dev->dev_ops->dev_ptypes_set == NULL) {
3525                 ret = 0;
3526                 goto ptype_unknown;
3527         }
3528
3529         if (ptype_mask == 0) {
3530                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3531                                 ptype_mask);
3532                 goto ptype_unknown;
3533         }
3534
3535         unused_mask = ptype_mask;
3536         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3537                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3538                 if (mask && mask != valid_ptype_masks[i]) {
3539                         ret = -EINVAL;
3540                         goto ptype_unknown;
3541                 }
3542                 unused_mask &= ~valid_ptype_masks[i];
3543         }
3544
3545         if (unused_mask) {
3546                 ret = -EINVAL;
3547                 goto ptype_unknown;
3548         }
3549
3550         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3551         if (all_ptypes == NULL) {
3552                 ret = 0;
3553                 goto ptype_unknown;
3554         }
3555
3556         /*
3557          * Accommodate as many set_ptypes as possible. If the supplied
3558          * set_ptypes array is insufficient fill it partially.
3559          */
3560         for (i = 0, j = 0; set_ptypes != NULL &&
3561                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3562                 if (ptype_mask & all_ptypes[i]) {
3563                         if (j < num - 1) {
3564                                 set_ptypes[j] = all_ptypes[i];
3565                                 j++;
3566                                 continue;
3567                         }
3568                         break;
3569                 }
3570         }
3571
3572         if (set_ptypes != NULL && j < num)
3573                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3574
3575         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3576
3577 ptype_unknown:
3578         if (num > 0)
3579                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3580
3581         return ret;
3582 }
3583
3584 int
3585 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3586 {
3587         struct rte_eth_dev *dev;
3588
3589         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3590         dev = &rte_eth_devices[port_id];
3591
3592         if (mac_addr == NULL) {
3593                 RTE_ETHDEV_LOG(ERR,
3594                         "Cannot get ethdev port %u MAC address to NULL\n",
3595                         port_id);
3596                 return -EINVAL;
3597         }
3598
3599         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3600
3601         return 0;
3602 }
3603
3604 int
3605 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3606 {
3607         struct rte_eth_dev *dev;
3608
3609         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3610         dev = &rte_eth_devices[port_id];
3611
3612         if (mtu == NULL) {
3613                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3614                         port_id);
3615                 return -EINVAL;
3616         }
3617
3618         *mtu = dev->data->mtu;
3619         return 0;
3620 }
3621
3622 int
3623 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3624 {
3625         int ret;
3626         struct rte_eth_dev_info dev_info;
3627         struct rte_eth_dev *dev;
3628
3629         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3630         dev = &rte_eth_devices[port_id];
3631         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3632
3633         /*
3634          * Check if the device supports dev_infos_get, if it does not
3635          * skip min_mtu/max_mtu validation here as this requires values
3636          * that are populated within the call to rte_eth_dev_info_get()
3637          * which relies on dev->dev_ops->dev_infos_get.
3638          */
3639         if (*dev->dev_ops->dev_infos_get != NULL) {
3640                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3641                 if (ret != 0)
3642                         return ret;
3643
3644                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3645                         return -EINVAL;
3646         }
3647
3648         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3649         if (!ret)
3650                 dev->data->mtu = mtu;
3651
3652         return eth_err(port_id, ret);
3653 }
3654
3655 int
3656 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3657 {
3658         struct rte_eth_dev *dev;
3659         int ret;
3660
3661         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3662         dev = &rte_eth_devices[port_id];
3663
3664         if (!(dev->data->dev_conf.rxmode.offloads &
3665               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3666                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3667                         port_id);
3668                 return -ENOSYS;
3669         }
3670
3671         if (vlan_id > 4095) {
3672                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3673                         port_id, vlan_id);
3674                 return -EINVAL;
3675         }
3676         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3677
3678         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3679         if (ret == 0) {
3680                 struct rte_vlan_filter_conf *vfc;
3681                 int vidx;
3682                 int vbit;
3683
3684                 vfc = &dev->data->vlan_filter_conf;
3685                 vidx = vlan_id / 64;
3686                 vbit = vlan_id % 64;
3687
3688                 if (on)
3689                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3690                 else
3691                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3692         }
3693
3694         return eth_err(port_id, ret);
3695 }
3696
3697 int
3698 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3699                                     int on)
3700 {
3701         struct rte_eth_dev *dev;
3702
3703         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3704         dev = &rte_eth_devices[port_id];
3705
3706         if (rx_queue_id >= dev->data->nb_rx_queues) {
3707                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3708                 return -EINVAL;
3709         }
3710
3711         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3712         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3713
3714         return 0;
3715 }
3716
3717 int
3718 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3719                                 enum rte_vlan_type vlan_type,
3720                                 uint16_t tpid)
3721 {
3722         struct rte_eth_dev *dev;
3723
3724         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3725         dev = &rte_eth_devices[port_id];
3726
3727         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3728         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3729                                                                tpid));
3730 }
3731
3732 int
3733 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3734 {
3735         struct rte_eth_dev_info dev_info;
3736         struct rte_eth_dev *dev;
3737         int ret = 0;
3738         int mask = 0;
3739         int cur, org = 0;
3740         uint64_t orig_offloads;
3741         uint64_t dev_offloads;
3742         uint64_t new_offloads;
3743
3744         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3745         dev = &rte_eth_devices[port_id];
3746
3747         /* save original values in case of failure */
3748         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3749         dev_offloads = orig_offloads;
3750
3751         /* check which option changed by application */
3752         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3753         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3754         if (cur != org) {
3755                 if (cur)
3756                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3757                 else
3758                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3759                 mask |= ETH_VLAN_STRIP_MASK;
3760         }
3761
3762         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3763         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3764         if (cur != org) {
3765                 if (cur)
3766                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3767                 else
3768                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3769                 mask |= ETH_VLAN_FILTER_MASK;
3770         }
3771
3772         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3773         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3774         if (cur != org) {
3775                 if (cur)
3776                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3777                 else
3778                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3779                 mask |= ETH_VLAN_EXTEND_MASK;
3780         }
3781
3782         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3783         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3784         if (cur != org) {
3785                 if (cur)
3786                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3787                 else
3788                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3789                 mask |= ETH_QINQ_STRIP_MASK;
3790         }
3791
3792         /*no change*/
3793         if (mask == 0)
3794                 return ret;
3795
3796         ret = rte_eth_dev_info_get(port_id, &dev_info);
3797         if (ret != 0)
3798                 return ret;
3799
3800         /* Rx VLAN offloading must be within its device capabilities */
3801         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3802                 new_offloads = dev_offloads & ~orig_offloads;
3803                 RTE_ETHDEV_LOG(ERR,
3804                         "Ethdev port_id=%u requested new added VLAN offloads "
3805                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3806                         "0x%" PRIx64 " in %s()\n",
3807                         port_id, new_offloads, dev_info.rx_offload_capa,
3808                         __func__);
3809                 return -EINVAL;
3810         }
3811
3812         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3813         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3814         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3815         if (ret) {
3816                 /* hit an error restore  original values */
3817                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3818         }
3819
3820         return eth_err(port_id, ret);
3821 }
3822
3823 int
3824 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3825 {
3826         struct rte_eth_dev *dev;
3827         uint64_t *dev_offloads;
3828         int ret = 0;
3829
3830         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3831         dev = &rte_eth_devices[port_id];
3832         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3833
3834         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3835                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3836
3837         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3838                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3839
3840         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3841                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3842
3843         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3844                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3845
3846         return ret;
3847 }
3848
3849 int
3850 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3851 {
3852         struct rte_eth_dev *dev;
3853
3854         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3855         dev = &rte_eth_devices[port_id];
3856
3857         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3858         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3859 }
3860
3861 int
3862 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3863 {
3864         struct rte_eth_dev *dev;
3865
3866         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3867         dev = &rte_eth_devices[port_id];
3868
3869         if (fc_conf == NULL) {
3870                 RTE_ETHDEV_LOG(ERR,
3871                         "Cannot get ethdev port %u flow control config to NULL\n",
3872                         port_id);
3873                 return -EINVAL;
3874         }
3875
3876         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3877         memset(fc_conf, 0, sizeof(*fc_conf));
3878         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3879 }
3880
3881 int
3882 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3883 {
3884         struct rte_eth_dev *dev;
3885
3886         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3887         dev = &rte_eth_devices[port_id];
3888
3889         if (fc_conf == NULL) {
3890                 RTE_ETHDEV_LOG(ERR,
3891                         "Cannot set ethdev port %u flow control from NULL config\n",
3892                         port_id);
3893                 return -EINVAL;
3894         }
3895
3896         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3897                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3898                 return -EINVAL;
3899         }
3900
3901         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3902         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3903 }
3904
3905 int
3906 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3907                                    struct rte_eth_pfc_conf *pfc_conf)
3908 {
3909         struct rte_eth_dev *dev;
3910
3911         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3912         dev = &rte_eth_devices[port_id];
3913
3914         if (pfc_conf == NULL) {
3915                 RTE_ETHDEV_LOG(ERR,
3916                         "Cannot set ethdev port %u priority flow control from NULL config\n",
3917                         port_id);
3918                 return -EINVAL;
3919         }
3920
3921         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3922                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3923                 return -EINVAL;
3924         }
3925
3926         /* High water, low water validation are device specific */
3927         if  (*dev->dev_ops->priority_flow_ctrl_set)
3928                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3929                                         (dev, pfc_conf));
3930         return -ENOTSUP;
3931 }
3932
3933 static int
3934 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3935                         uint16_t reta_size)
3936 {
3937         uint16_t i, num;
3938
3939         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3940         for (i = 0; i < num; i++) {
3941                 if (reta_conf[i].mask)
3942                         return 0;
3943         }
3944
3945         return -EINVAL;
3946 }
3947
3948 static int
3949 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3950                          uint16_t reta_size,
3951                          uint16_t max_rxq)
3952 {
3953         uint16_t i, idx, shift;
3954
3955         if (max_rxq == 0) {
3956                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3957                 return -EINVAL;
3958         }
3959
3960         for (i = 0; i < reta_size; i++) {
3961                 idx = i / RTE_RETA_GROUP_SIZE;
3962                 shift = i % RTE_RETA_GROUP_SIZE;
3963                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3964                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3965                         RTE_ETHDEV_LOG(ERR,
3966                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3967                                 idx, shift,
3968                                 reta_conf[idx].reta[shift], max_rxq);
3969                         return -EINVAL;
3970                 }
3971         }
3972
3973         return 0;
3974 }
3975
3976 int
3977 rte_eth_dev_rss_reta_update(uint16_t port_id,
3978                             struct rte_eth_rss_reta_entry64 *reta_conf,
3979                             uint16_t reta_size)
3980 {
3981         struct rte_eth_dev *dev;
3982         int ret;
3983
3984         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3985         dev = &rte_eth_devices[port_id];
3986
3987         if (reta_conf == NULL) {
3988                 RTE_ETHDEV_LOG(ERR,
3989                         "Cannot update ethdev port %u RSS RETA to NULL\n",
3990                         port_id);
3991                 return -EINVAL;
3992         }
3993
3994         if (reta_size == 0) {
3995                 RTE_ETHDEV_LOG(ERR,
3996                         "Cannot update ethdev port %u RSS RETA with zero size\n",
3997                         port_id);
3998                 return -EINVAL;
3999         }
4000
4001         /* Check mask bits */
4002         ret = eth_check_reta_mask(reta_conf, reta_size);
4003         if (ret < 0)
4004                 return ret;
4005
4006         /* Check entry value */
4007         ret = eth_check_reta_entry(reta_conf, reta_size,
4008                                 dev->data->nb_rx_queues);
4009         if (ret < 0)
4010                 return ret;
4011
4012         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
4013         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
4014                                                              reta_size));
4015 }
4016
4017 int
4018 rte_eth_dev_rss_reta_query(uint16_t port_id,
4019                            struct rte_eth_rss_reta_entry64 *reta_conf,
4020                            uint16_t reta_size)
4021 {
4022         struct rte_eth_dev *dev;
4023         int ret;
4024
4025         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4026         dev = &rte_eth_devices[port_id];
4027
4028         if (reta_conf == NULL) {
4029                 RTE_ETHDEV_LOG(ERR,
4030                         "Cannot query ethdev port %u RSS RETA from NULL config\n",
4031                         port_id);
4032                 return -EINVAL;
4033         }
4034
4035         /* Check mask bits */
4036         ret = eth_check_reta_mask(reta_conf, reta_size);
4037         if (ret < 0)
4038                 return ret;
4039
4040         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
4041         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4042                                                             reta_size));
4043 }
4044
4045 int
4046 rte_eth_dev_rss_hash_update(uint16_t port_id,
4047                             struct rte_eth_rss_conf *rss_conf)
4048 {
4049         struct rte_eth_dev *dev;
4050         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
4051         int ret;
4052
4053         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4054         dev = &rte_eth_devices[port_id];
4055
4056         if (rss_conf == NULL) {
4057                 RTE_ETHDEV_LOG(ERR,
4058                         "Cannot update ethdev port %u RSS hash from NULL config\n",
4059                         port_id);
4060                 return -EINVAL;
4061         }
4062
4063         ret = rte_eth_dev_info_get(port_id, &dev_info);
4064         if (ret != 0)
4065                 return ret;
4066
4067         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4068         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4069             dev_info.flow_type_rss_offloads) {
4070                 RTE_ETHDEV_LOG(ERR,
4071                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4072                         port_id, rss_conf->rss_hf,
4073                         dev_info.flow_type_rss_offloads);
4074                 return -EINVAL;
4075         }
4076         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
4077         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4078                                                                  rss_conf));
4079 }
4080
4081 int
4082 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4083                               struct rte_eth_rss_conf *rss_conf)
4084 {
4085         struct rte_eth_dev *dev;
4086
4087         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4088         dev = &rte_eth_devices[port_id];
4089
4090         if (rss_conf == NULL) {
4091                 RTE_ETHDEV_LOG(ERR,
4092                         "Cannot get ethdev port %u RSS hash config to NULL\n",
4093                         port_id);
4094                 return -EINVAL;
4095         }
4096
4097         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
4098         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4099                                                                    rss_conf));
4100 }
4101
4102 int
4103 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4104                                 struct rte_eth_udp_tunnel *udp_tunnel)
4105 {
4106         struct rte_eth_dev *dev;
4107
4108         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4109         dev = &rte_eth_devices[port_id];
4110
4111         if (udp_tunnel == NULL) {
4112                 RTE_ETHDEV_LOG(ERR,
4113                         "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4114                         port_id);
4115                 return -EINVAL;
4116         }
4117
4118         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
4119                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4120                 return -EINVAL;
4121         }
4122
4123         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
4124         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4125                                                                 udp_tunnel));
4126 }
4127
4128 int
4129 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4130                                    struct rte_eth_udp_tunnel *udp_tunnel)
4131 {
4132         struct rte_eth_dev *dev;
4133
4134         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4135         dev = &rte_eth_devices[port_id];
4136
4137         if (udp_tunnel == NULL) {
4138                 RTE_ETHDEV_LOG(ERR,
4139                         "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4140                         port_id);
4141                 return -EINVAL;
4142         }
4143
4144         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
4145                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4146                 return -EINVAL;
4147         }
4148
4149         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
4150         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4151                                                                 udp_tunnel));
4152 }
4153
4154 int
4155 rte_eth_led_on(uint16_t port_id)
4156 {
4157         struct rte_eth_dev *dev;
4158
4159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4160         dev = &rte_eth_devices[port_id];
4161
4162         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
4163         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4164 }
4165
4166 int
4167 rte_eth_led_off(uint16_t port_id)
4168 {
4169         struct rte_eth_dev *dev;
4170
4171         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4172         dev = &rte_eth_devices[port_id];
4173
4174         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
4175         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4176 }
4177
4178 int
4179 rte_eth_fec_get_capability(uint16_t port_id,
4180                            struct rte_eth_fec_capa *speed_fec_capa,
4181                            unsigned int num)
4182 {
4183         struct rte_eth_dev *dev;
4184         int ret;
4185
4186         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4187         dev = &rte_eth_devices[port_id];
4188
4189         if (speed_fec_capa == NULL && num > 0) {
4190                 RTE_ETHDEV_LOG(ERR,
4191                         "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4192                         port_id);
4193                 return -EINVAL;
4194         }
4195
4196         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
4197         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4198
4199         return ret;
4200 }
4201
4202 int
4203 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4204 {
4205         struct rte_eth_dev *dev;
4206
4207         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4208         dev = &rte_eth_devices[port_id];
4209
4210         if (fec_capa == NULL) {
4211                 RTE_ETHDEV_LOG(ERR,
4212                         "Cannot get ethdev port %u current FEC mode to NULL\n",
4213                         port_id);
4214                 return -EINVAL;
4215         }
4216
4217         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
4218         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4219 }
4220
4221 int
4222 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4223 {
4224         struct rte_eth_dev *dev;
4225
4226         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4227         dev = &rte_eth_devices[port_id];
4228
4229         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
4230         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4231 }
4232
4233 /*
4234  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4235  * an empty spot.
4236  */
4237 static int
4238 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4239 {
4240         struct rte_eth_dev_info dev_info;
4241         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4242         unsigned i;
4243         int ret;
4244
4245         ret = rte_eth_dev_info_get(port_id, &dev_info);
4246         if (ret != 0)
4247                 return -1;
4248
4249         for (i = 0; i < dev_info.max_mac_addrs; i++)
4250                 if (memcmp(addr, &dev->data->mac_addrs[i],
4251                                 RTE_ETHER_ADDR_LEN) == 0)
4252                         return i;
4253
4254         return -1;
4255 }
4256
4257 static const struct rte_ether_addr null_mac_addr;
4258
4259 int
4260 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4261                         uint32_t pool)
4262 {
4263         struct rte_eth_dev *dev;
4264         int index;
4265         uint64_t pool_mask;
4266         int ret;
4267
4268         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4269         dev = &rte_eth_devices[port_id];
4270
4271         if (addr == NULL) {
4272                 RTE_ETHDEV_LOG(ERR,
4273                         "Cannot add ethdev port %u MAC address from NULL address\n",
4274                         port_id);
4275                 return -EINVAL;
4276         }
4277
4278         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4279
4280         if (rte_is_zero_ether_addr(addr)) {
4281                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4282                         port_id);
4283                 return -EINVAL;
4284         }
4285         if (pool >= ETH_64_POOLS) {
4286                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
4287                 return -EINVAL;
4288         }
4289
4290         index = eth_dev_get_mac_addr_index(port_id, addr);
4291         if (index < 0) {
4292                 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4293                 if (index < 0) {
4294                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4295                                 port_id);
4296                         return -ENOSPC;
4297                 }
4298         } else {
4299                 pool_mask = dev->data->mac_pool_sel[index];
4300
4301                 /* Check if both MAC address and pool is already there, and do nothing */
4302                 if (pool_mask & (1ULL << pool))
4303                         return 0;
4304         }
4305
4306         /* Update NIC */
4307         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4308
4309         if (ret == 0) {
4310                 /* Update address in NIC data structure */
4311                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4312
4313                 /* Update pool bitmap in NIC data structure */
4314                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
4315         }
4316
4317         return eth_err(port_id, ret);
4318 }
4319
4320 int
4321 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4322 {
4323         struct rte_eth_dev *dev;
4324         int index;
4325
4326         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4327         dev = &rte_eth_devices[port_id];
4328
4329         if (addr == NULL) {
4330                 RTE_ETHDEV_LOG(ERR,
4331                         "Cannot remove ethdev port %u MAC address from NULL address\n",
4332                         port_id);
4333                 return -EINVAL;
4334         }
4335
4336         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4337
4338         index = eth_dev_get_mac_addr_index(port_id, addr);
4339         if (index == 0) {
4340                 RTE_ETHDEV_LOG(ERR,
4341                         "Port %u: Cannot remove default MAC address\n",
4342                         port_id);
4343                 return -EADDRINUSE;
4344         } else if (index < 0)
4345                 return 0;  /* Do nothing if address wasn't found */
4346
4347         /* Update NIC */
4348         (*dev->dev_ops->mac_addr_remove)(dev, index);
4349
4350         /* Update address in NIC data structure */
4351         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4352
4353         /* reset pool bitmap */
4354         dev->data->mac_pool_sel[index] = 0;
4355
4356         return 0;
4357 }
4358
4359 int
4360 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4361 {
4362         struct rte_eth_dev *dev;
4363         int ret;
4364
4365         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4366         dev = &rte_eth_devices[port_id];
4367
4368         if (addr == NULL) {
4369                 RTE_ETHDEV_LOG(ERR,
4370                         "Cannot set ethdev port %u default MAC address from NULL address\n",
4371                         port_id);
4372                 return -EINVAL;
4373         }
4374
4375         if (!rte_is_valid_assigned_ether_addr(addr))
4376                 return -EINVAL;
4377
4378         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4379
4380         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4381         if (ret < 0)
4382                 return ret;
4383
4384         /* Update default address in NIC data structure */
4385         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4386
4387         return 0;
4388 }
4389
4390
4391 /*
4392  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4393  * an empty spot.
4394  */
4395 static int
4396 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4397                 const struct rte_ether_addr *addr)
4398 {
4399         struct rte_eth_dev_info dev_info;
4400         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4401         unsigned i;
4402         int ret;
4403
4404         ret = rte_eth_dev_info_get(port_id, &dev_info);
4405         if (ret != 0)
4406                 return -1;
4407
4408         if (!dev->data->hash_mac_addrs)
4409                 return -1;
4410
4411         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4412                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4413                         RTE_ETHER_ADDR_LEN) == 0)
4414                         return i;
4415
4416         return -1;
4417 }
4418
4419 int
4420 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4421                                 uint8_t on)
4422 {
4423         int index;
4424         int ret;
4425         struct rte_eth_dev *dev;
4426
4427         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4428         dev = &rte_eth_devices[port_id];
4429
4430         if (addr == NULL) {
4431                 RTE_ETHDEV_LOG(ERR,
4432                         "Cannot set ethdev port %u unicast hash table from NULL address\n",
4433                         port_id);
4434                 return -EINVAL;
4435         }
4436
4437         if (rte_is_zero_ether_addr(addr)) {
4438                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4439                         port_id);
4440                 return -EINVAL;
4441         }
4442
4443         index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4444         /* Check if it's already there, and do nothing */
4445         if ((index >= 0) && on)
4446                 return 0;
4447
4448         if (index < 0) {
4449                 if (!on) {
4450                         RTE_ETHDEV_LOG(ERR,
4451                                 "Port %u: the MAC address was not set in UTA\n",
4452                                 port_id);
4453                         return -EINVAL;
4454                 }
4455
4456                 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4457                 if (index < 0) {
4458                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4459                                 port_id);
4460                         return -ENOSPC;
4461                 }
4462         }
4463
4464         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4465         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4466         if (ret == 0) {
4467                 /* Update address in NIC data structure */
4468                 if (on)
4469                         rte_ether_addr_copy(addr,
4470                                         &dev->data->hash_mac_addrs[index]);
4471                 else
4472                         rte_ether_addr_copy(&null_mac_addr,
4473                                         &dev->data->hash_mac_addrs[index]);
4474         }
4475
4476         return eth_err(port_id, ret);
4477 }
4478
4479 int
4480 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4481 {
4482         struct rte_eth_dev *dev;
4483
4484         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4485         dev = &rte_eth_devices[port_id];
4486
4487         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4488         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4489                                                                        on));
4490 }
4491
4492 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4493                                         uint16_t tx_rate)
4494 {
4495         struct rte_eth_dev *dev;
4496         struct rte_eth_dev_info dev_info;
4497         struct rte_eth_link link;
4498         int ret;
4499
4500         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4501         dev = &rte_eth_devices[port_id];
4502
4503         ret = rte_eth_dev_info_get(port_id, &dev_info);
4504         if (ret != 0)
4505                 return ret;
4506
4507         link = dev->data->dev_link;
4508
4509         if (queue_idx > dev_info.max_tx_queues) {
4510                 RTE_ETHDEV_LOG(ERR,
4511                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4512                         port_id, queue_idx);
4513                 return -EINVAL;
4514         }
4515
4516         if (tx_rate > link.link_speed) {
4517                 RTE_ETHDEV_LOG(ERR,
4518                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4519                         tx_rate, link.link_speed);
4520                 return -EINVAL;
4521         }
4522
4523         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4524         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4525                                                         queue_idx, tx_rate));
4526 }
4527
4528 int
4529 rte_eth_mirror_rule_set(uint16_t port_id,
4530                         struct rte_eth_mirror_conf *mirror_conf,
4531                         uint8_t rule_id, uint8_t on)
4532 {
4533         struct rte_eth_dev *dev;
4534
4535         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4536         dev = &rte_eth_devices[port_id];
4537
4538         if (mirror_conf == NULL) {
4539                 RTE_ETHDEV_LOG(ERR,
4540                         "Cannot set ethdev port %u mirror rule from NULL config\n",
4541                         port_id);
4542                 return -EINVAL;
4543         }
4544
4545         if (mirror_conf->rule_type == 0) {
4546                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4547                 return -EINVAL;
4548         }
4549
4550         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4551                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4552                         ETH_64_POOLS - 1);
4553                 return -EINVAL;
4554         }
4555
4556         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4557              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4558             (mirror_conf->pool_mask == 0)) {
4559                 RTE_ETHDEV_LOG(ERR,
4560                         "Invalid mirror pool, pool mask can not be 0\n");
4561                 return -EINVAL;
4562         }
4563
4564         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4565             mirror_conf->vlan.vlan_mask == 0) {
4566                 RTE_ETHDEV_LOG(ERR,
4567                         "Invalid vlan mask, vlan mask can not be 0\n");
4568                 return -EINVAL;
4569         }
4570
4571         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4572
4573         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4574                                                 mirror_conf, rule_id, on));
4575 }
4576
4577 int
4578 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4579 {
4580         struct rte_eth_dev *dev;
4581
4582         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4583         dev = &rte_eth_devices[port_id];
4584
4585         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4586         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev, rule_id));
4587 }
4588
4589 RTE_INIT(eth_dev_init_cb_lists)
4590 {
4591         uint16_t i;
4592
4593         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4594                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4595 }
4596
4597 int
4598 rte_eth_dev_callback_register(uint16_t port_id,
4599                         enum rte_eth_event_type event,
4600                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4601 {
4602         struct rte_eth_dev *dev;
4603         struct rte_eth_dev_callback *user_cb;
4604         uint16_t next_port;
4605         uint16_t last_port;
4606
4607         if (cb_fn == NULL) {
4608                 RTE_ETHDEV_LOG(ERR,
4609                         "Cannot register ethdev port %u callback from NULL\n",
4610                         port_id);
4611                 return -EINVAL;
4612         }
4613
4614         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4615                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4616                 return -EINVAL;
4617         }
4618
4619         if (port_id == RTE_ETH_ALL) {
4620                 next_port = 0;
4621                 last_port = RTE_MAX_ETHPORTS - 1;
4622         } else {
4623                 next_port = last_port = port_id;
4624         }
4625
4626         rte_spinlock_lock(&eth_dev_cb_lock);
4627
4628         do {
4629                 dev = &rte_eth_devices[next_port];
4630
4631                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4632                         if (user_cb->cb_fn == cb_fn &&
4633                                 user_cb->cb_arg == cb_arg &&
4634                                 user_cb->event == event) {
4635                                 break;
4636                         }
4637                 }
4638
4639                 /* create a new callback. */
4640                 if (user_cb == NULL) {
4641                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4642                                 sizeof(struct rte_eth_dev_callback), 0);
4643                         if (user_cb != NULL) {
4644                                 user_cb->cb_fn = cb_fn;
4645                                 user_cb->cb_arg = cb_arg;
4646                                 user_cb->event = event;
4647                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4648                                                   user_cb, next);
4649                         } else {
4650                                 rte_spinlock_unlock(&eth_dev_cb_lock);
4651                                 rte_eth_dev_callback_unregister(port_id, event,
4652                                                                 cb_fn, cb_arg);
4653                                 return -ENOMEM;
4654                         }
4655
4656                 }
4657         } while (++next_port <= last_port);
4658
4659         rte_spinlock_unlock(&eth_dev_cb_lock);
4660         return 0;
4661 }
4662
4663 int
4664 rte_eth_dev_callback_unregister(uint16_t port_id,
4665                         enum rte_eth_event_type event,
4666                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4667 {
4668         int ret;
4669         struct rte_eth_dev *dev;
4670         struct rte_eth_dev_callback *cb, *next;
4671         uint16_t next_port;
4672         uint16_t last_port;
4673
4674         if (cb_fn == NULL) {
4675                 RTE_ETHDEV_LOG(ERR,
4676                         "Cannot unregister ethdev port %u callback from NULL\n",
4677                         port_id);
4678                 return -EINVAL;
4679         }
4680
4681         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4682                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4683                 return -EINVAL;
4684         }
4685
4686         if (port_id == RTE_ETH_ALL) {
4687                 next_port = 0;
4688                 last_port = RTE_MAX_ETHPORTS - 1;
4689         } else {
4690                 next_port = last_port = port_id;
4691         }
4692
4693         rte_spinlock_lock(&eth_dev_cb_lock);
4694
4695         do {
4696                 dev = &rte_eth_devices[next_port];
4697                 ret = 0;
4698                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4699                      cb = next) {
4700
4701                         next = TAILQ_NEXT(cb, next);
4702
4703                         if (cb->cb_fn != cb_fn || cb->event != event ||
4704                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4705                                 continue;
4706
4707                         /*
4708                          * if this callback is not executing right now,
4709                          * then remove it.
4710                          */
4711                         if (cb->active == 0) {
4712                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4713                                 rte_free(cb);
4714                         } else {
4715                                 ret = -EAGAIN;
4716                         }
4717                 }
4718         } while (++next_port <= last_port);
4719
4720         rte_spinlock_unlock(&eth_dev_cb_lock);
4721         return ret;
4722 }
4723
4724 int
4725 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4726         enum rte_eth_event_type event, void *ret_param)
4727 {
4728         struct rte_eth_dev_callback *cb_lst;
4729         struct rte_eth_dev_callback dev_cb;
4730         int rc = 0;
4731
4732         rte_spinlock_lock(&eth_dev_cb_lock);
4733         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4734                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4735                         continue;
4736                 dev_cb = *cb_lst;
4737                 cb_lst->active = 1;
4738                 if (ret_param != NULL)
4739                         dev_cb.ret_param = ret_param;
4740
4741                 rte_spinlock_unlock(&eth_dev_cb_lock);
4742                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4743                                 dev_cb.cb_arg, dev_cb.ret_param);
4744                 rte_spinlock_lock(&eth_dev_cb_lock);
4745                 cb_lst->active = 0;
4746         }
4747         rte_spinlock_unlock(&eth_dev_cb_lock);
4748         return rc;
4749 }
4750
4751 void
4752 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4753 {
4754         if (dev == NULL)
4755                 return;
4756
4757         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4758
4759         dev->state = RTE_ETH_DEV_ATTACHED;
4760 }
4761
4762 int
4763 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4764 {
4765         uint32_t vec;
4766         struct rte_eth_dev *dev;
4767         struct rte_intr_handle *intr_handle;
4768         uint16_t qid;
4769         int rc;
4770
4771         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4772         dev = &rte_eth_devices[port_id];
4773
4774         if (!dev->intr_handle) {
4775                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4776                 return -ENOTSUP;
4777         }
4778
4779         intr_handle = dev->intr_handle;
4780         if (!intr_handle->intr_vec) {
4781                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4782                 return -EPERM;
4783         }
4784
4785         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4786                 vec = intr_handle->intr_vec[qid];
4787                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4788                 if (rc && rc != -EEXIST) {
4789                         RTE_ETHDEV_LOG(ERR,
4790                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4791                                 port_id, qid, op, epfd, vec);
4792                 }
4793         }
4794
4795         return 0;
4796 }
4797
4798 int
4799 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4800 {
4801         struct rte_intr_handle *intr_handle;
4802         struct rte_eth_dev *dev;
4803         unsigned int efd_idx;
4804         uint32_t vec;
4805         int fd;
4806
4807         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4808         dev = &rte_eth_devices[port_id];
4809
4810         if (queue_id >= dev->data->nb_rx_queues) {
4811                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4812                 return -1;
4813         }
4814
4815         if (!dev->intr_handle) {
4816                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4817                 return -1;
4818         }
4819
4820         intr_handle = dev->intr_handle;
4821         if (!intr_handle->intr_vec) {
4822                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4823                 return -1;
4824         }
4825
4826         vec = intr_handle->intr_vec[queue_id];
4827         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4828                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4829         fd = intr_handle->efds[efd_idx];
4830
4831         return fd;
4832 }
4833
4834 static inline int
4835 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4836                 const char *ring_name)
4837 {
4838         return snprintf(name, len, "eth_p%d_q%d_%s",
4839                         port_id, queue_id, ring_name);
4840 }
4841
4842 const struct rte_memzone *
4843 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4844                          uint16_t queue_id, size_t size, unsigned align,
4845                          int socket_id)
4846 {
4847         char z_name[RTE_MEMZONE_NAMESIZE];
4848         const struct rte_memzone *mz;
4849         int rc;
4850
4851         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4852                         queue_id, ring_name);
4853         if (rc >= RTE_MEMZONE_NAMESIZE) {
4854                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4855                 rte_errno = ENAMETOOLONG;
4856                 return NULL;
4857         }
4858
4859         mz = rte_memzone_lookup(z_name);
4860         if (mz) {
4861                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4862                                 size > mz->len ||
4863                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4864                         RTE_ETHDEV_LOG(ERR,
4865                                 "memzone %s does not justify the requested attributes\n",
4866                                 mz->name);
4867                         return NULL;
4868                 }
4869
4870                 return mz;
4871         }
4872
4873         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4874                         RTE_MEMZONE_IOVA_CONTIG, align);
4875 }
4876
4877 int
4878 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4879                 uint16_t queue_id)
4880 {
4881         char z_name[RTE_MEMZONE_NAMESIZE];
4882         const struct rte_memzone *mz;
4883         int rc = 0;
4884
4885         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4886                         queue_id, ring_name);
4887         if (rc >= RTE_MEMZONE_NAMESIZE) {
4888                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4889                 return -ENAMETOOLONG;
4890         }
4891
4892         mz = rte_memzone_lookup(z_name);
4893         if (mz)
4894                 rc = rte_memzone_free(mz);
4895         else
4896                 rc = -ENOENT;
4897
4898         return rc;
4899 }
4900
4901 int
4902 rte_eth_dev_create(struct rte_device *device, const char *name,
4903         size_t priv_data_size,
4904         ethdev_bus_specific_init ethdev_bus_specific_init,
4905         void *bus_init_params,
4906         ethdev_init_t ethdev_init, void *init_params)
4907 {
4908         struct rte_eth_dev *ethdev;
4909         int retval;
4910
4911         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4912
4913         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4914                 ethdev = rte_eth_dev_allocate(name);
4915                 if (!ethdev)
4916                         return -ENODEV;
4917
4918                 if (priv_data_size) {
4919                         ethdev->data->dev_private = rte_zmalloc_socket(
4920                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4921                                 device->numa_node);
4922
4923                         if (!ethdev->data->dev_private) {
4924                                 RTE_ETHDEV_LOG(ERR,
4925                                         "failed to allocate private data\n");
4926                                 retval = -ENOMEM;
4927                                 goto probe_failed;
4928                         }
4929                 }
4930         } else {
4931                 ethdev = rte_eth_dev_attach_secondary(name);
4932                 if (!ethdev) {
4933                         RTE_ETHDEV_LOG(ERR,
4934                                 "secondary process attach failed, ethdev doesn't exist\n");
4935                         return  -ENODEV;
4936                 }
4937         }
4938
4939         ethdev->device = device;
4940
4941         if (ethdev_bus_specific_init) {
4942                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4943                 if (retval) {
4944                         RTE_ETHDEV_LOG(ERR,
4945                                 "ethdev bus specific initialisation failed\n");
4946                         goto probe_failed;
4947                 }
4948         }
4949
4950         retval = ethdev_init(ethdev, init_params);
4951         if (retval) {
4952                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4953                 goto probe_failed;
4954         }
4955
4956         rte_eth_dev_probing_finish(ethdev);
4957
4958         return retval;
4959
4960 probe_failed:
4961         rte_eth_dev_release_port(ethdev);
4962         return retval;
4963 }
4964
4965 int
4966 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4967         ethdev_uninit_t ethdev_uninit)
4968 {
4969         int ret;
4970
4971         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4972         if (!ethdev)
4973                 return -ENODEV;
4974
4975         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4976
4977         ret = ethdev_uninit(ethdev);
4978         if (ret)
4979                 return ret;
4980
4981         return rte_eth_dev_release_port(ethdev);
4982 }
4983
4984 int
4985 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4986                           int epfd, int op, void *data)
4987 {
4988         uint32_t vec;
4989         struct rte_eth_dev *dev;
4990         struct rte_intr_handle *intr_handle;
4991         int rc;
4992
4993         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4994         dev = &rte_eth_devices[port_id];
4995
4996         if (queue_id >= dev->data->nb_rx_queues) {
4997                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4998                 return -EINVAL;
4999         }
5000
5001         if (!dev->intr_handle) {
5002                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
5003                 return -ENOTSUP;
5004         }
5005
5006         intr_handle = dev->intr_handle;
5007         if (!intr_handle->intr_vec) {
5008                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
5009                 return -EPERM;
5010         }
5011
5012         vec = intr_handle->intr_vec[queue_id];
5013         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
5014         if (rc && rc != -EEXIST) {
5015                 RTE_ETHDEV_LOG(ERR,
5016                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
5017                         port_id, queue_id, op, epfd, vec);
5018                 return rc;
5019         }
5020
5021         return 0;
5022 }
5023
5024 int
5025 rte_eth_dev_rx_intr_enable(uint16_t port_id,
5026                            uint16_t queue_id)
5027 {
5028         struct rte_eth_dev *dev;
5029         int ret;
5030
5031         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5032         dev = &rte_eth_devices[port_id];
5033
5034         ret = eth_dev_validate_rx_queue(dev, queue_id);
5035         if (ret != 0)
5036                 return ret;
5037
5038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
5039         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
5040 }
5041
5042 int
5043 rte_eth_dev_rx_intr_disable(uint16_t port_id,
5044                             uint16_t queue_id)
5045 {
5046         struct rte_eth_dev *dev;
5047         int ret;
5048
5049         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5050         dev = &rte_eth_devices[port_id];
5051
5052         ret = eth_dev_validate_rx_queue(dev, queue_id);
5053         if (ret != 0)
5054                 return ret;
5055
5056         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
5057         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
5058 }
5059
5060
5061 const struct rte_eth_rxtx_callback *
5062 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
5063                 rte_rx_callback_fn fn, void *user_param)
5064 {
5065 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5066         rte_errno = ENOTSUP;
5067         return NULL;
5068 #endif
5069         struct rte_eth_dev *dev;
5070
5071         /* check input parameters */
5072         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5073                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5074                 rte_errno = EINVAL;
5075                 return NULL;
5076         }
5077         dev = &rte_eth_devices[port_id];
5078         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5079                 rte_errno = EINVAL;
5080                 return NULL;
5081         }
5082         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5083
5084         if (cb == NULL) {
5085                 rte_errno = ENOMEM;
5086                 return NULL;
5087         }
5088
5089         cb->fn.rx = fn;
5090         cb->param = user_param;
5091
5092         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5093         /* Add the callbacks in fifo order. */
5094         struct rte_eth_rxtx_callback *tail =
5095                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5096
5097         if (!tail) {
5098                 /* Stores to cb->fn and cb->param should complete before
5099                  * cb is visible to data plane.
5100                  */
5101                 __atomic_store_n(
5102                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5103                         cb, __ATOMIC_RELEASE);
5104
5105         } else {
5106                 while (tail->next)
5107                         tail = tail->next;
5108                 /* Stores to cb->fn and cb->param should complete before
5109                  * cb is visible to data plane.
5110                  */
5111                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5112         }
5113         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5114
5115         return cb;
5116 }
5117
5118 const struct rte_eth_rxtx_callback *
5119 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5120                 rte_rx_callback_fn fn, void *user_param)
5121 {
5122 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5123         rte_errno = ENOTSUP;
5124         return NULL;
5125 #endif
5126         /* check input parameters */
5127         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5128                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5129                 rte_errno = EINVAL;
5130                 return NULL;
5131         }
5132
5133         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5134
5135         if (cb == NULL) {
5136                 rte_errno = ENOMEM;
5137                 return NULL;
5138         }
5139
5140         cb->fn.rx = fn;
5141         cb->param = user_param;
5142
5143         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5144         /* Add the callbacks at first position */
5145         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5146         /* Stores to cb->fn, cb->param and cb->next should complete before
5147          * cb is visible to data plane threads.
5148          */
5149         __atomic_store_n(
5150                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5151                 cb, __ATOMIC_RELEASE);
5152         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5153
5154         return cb;
5155 }
5156
5157 const struct rte_eth_rxtx_callback *
5158 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5159                 rte_tx_callback_fn fn, void *user_param)
5160 {
5161 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5162         rte_errno = ENOTSUP;
5163         return NULL;
5164 #endif
5165         struct rte_eth_dev *dev;
5166
5167         /* check input parameters */
5168         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5169                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
5170                 rte_errno = EINVAL;
5171                 return NULL;
5172         }
5173
5174         dev = &rte_eth_devices[port_id];
5175         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5176                 rte_errno = EINVAL;
5177                 return NULL;
5178         }
5179
5180         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5181
5182         if (cb == NULL) {
5183                 rte_errno = ENOMEM;
5184                 return NULL;
5185         }
5186
5187         cb->fn.tx = fn;
5188         cb->param = user_param;
5189
5190         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5191         /* Add the callbacks in fifo order. */
5192         struct rte_eth_rxtx_callback *tail =
5193                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
5194
5195         if (!tail) {
5196                 /* Stores to cb->fn and cb->param should complete before
5197                  * cb is visible to data plane.
5198                  */
5199                 __atomic_store_n(
5200                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
5201                         cb, __ATOMIC_RELEASE);
5202
5203         } else {
5204                 while (tail->next)
5205                         tail = tail->next;
5206                 /* Stores to cb->fn and cb->param should complete before
5207                  * cb is visible to data plane.
5208                  */
5209                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5210         }
5211         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5212
5213         return cb;
5214 }
5215
5216 int
5217 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5218                 const struct rte_eth_rxtx_callback *user_cb)
5219 {
5220 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5221         return -ENOTSUP;
5222 #endif
5223         /* Check input parameters. */
5224         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5225         if (user_cb == NULL ||
5226                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
5227                 return -EINVAL;
5228
5229         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5230         struct rte_eth_rxtx_callback *cb;
5231         struct rte_eth_rxtx_callback **prev_cb;
5232         int ret = -EINVAL;
5233
5234         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5235         prev_cb = &dev->post_rx_burst_cbs[queue_id];
5236         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5237                 cb = *prev_cb;
5238                 if (cb == user_cb) {
5239                         /* Remove the user cb from the callback list. */
5240                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5241                         ret = 0;
5242                         break;
5243                 }
5244         }
5245         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5246
5247         return ret;
5248 }
5249
5250 int
5251 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5252                 const struct rte_eth_rxtx_callback *user_cb)
5253 {
5254 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5255         return -ENOTSUP;
5256 #endif
5257         /* Check input parameters. */
5258         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5259         if (user_cb == NULL ||
5260                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5261                 return -EINVAL;
5262
5263         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5264         int ret = -EINVAL;
5265         struct rte_eth_rxtx_callback *cb;
5266         struct rte_eth_rxtx_callback **prev_cb;
5267
5268         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5269         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5270         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5271                 cb = *prev_cb;
5272                 if (cb == user_cb) {
5273                         /* Remove the user cb from the callback list. */
5274                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5275                         ret = 0;
5276                         break;
5277                 }
5278         }
5279         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5280
5281         return ret;
5282 }
5283
5284 int
5285 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5286         struct rte_eth_rxq_info *qinfo)
5287 {
5288         struct rte_eth_dev *dev;
5289
5290         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5291         dev = &rte_eth_devices[port_id];
5292
5293         if (queue_id >= dev->data->nb_rx_queues) {
5294                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5295                 return -EINVAL;
5296         }
5297
5298         if (qinfo == NULL) {
5299                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5300                         port_id, queue_id);
5301                 return -EINVAL;
5302         }
5303
5304         if (dev->data->rx_queues == NULL ||
5305                         dev->data->rx_queues[queue_id] == NULL) {
5306                 RTE_ETHDEV_LOG(ERR,
5307                                "Rx queue %"PRIu16" of device with port_id=%"
5308                                PRIu16" has not been setup\n",
5309                                queue_id, port_id);
5310                 return -EINVAL;
5311         }
5312
5313         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5314                 RTE_ETHDEV_LOG(INFO,
5315                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5316                         queue_id, port_id);
5317                 return -EINVAL;
5318         }
5319
5320         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5321
5322         memset(qinfo, 0, sizeof(*qinfo));
5323         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5324         qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5325
5326         return 0;
5327 }
5328
5329 int
5330 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5331         struct rte_eth_txq_info *qinfo)
5332 {
5333         struct rte_eth_dev *dev;
5334
5335         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5336         dev = &rte_eth_devices[port_id];
5337
5338         if (queue_id >= dev->data->nb_tx_queues) {
5339                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5340                 return -EINVAL;
5341         }
5342
5343         if (qinfo == NULL) {
5344                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5345                         port_id, queue_id);
5346                 return -EINVAL;
5347         }
5348
5349         if (dev->data->tx_queues == NULL ||
5350                         dev->data->tx_queues[queue_id] == NULL) {
5351                 RTE_ETHDEV_LOG(ERR,
5352                                "Tx queue %"PRIu16" of device with port_id=%"
5353                                PRIu16" has not been setup\n",
5354                                queue_id, port_id);
5355                 return -EINVAL;
5356         }
5357
5358         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5359                 RTE_ETHDEV_LOG(INFO,
5360                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5361                         queue_id, port_id);
5362                 return -EINVAL;
5363         }
5364
5365         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5366
5367         memset(qinfo, 0, sizeof(*qinfo));
5368         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5369         qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5370
5371         return 0;
5372 }
5373
5374 int
5375 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5376                           struct rte_eth_burst_mode *mode)
5377 {
5378         struct rte_eth_dev *dev;
5379
5380         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5381         dev = &rte_eth_devices[port_id];
5382
5383         if (queue_id >= dev->data->nb_rx_queues) {
5384                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5385                 return -EINVAL;
5386         }
5387
5388         if (mode == NULL) {
5389                 RTE_ETHDEV_LOG(ERR,
5390                         "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5391                         port_id, queue_id);
5392                 return -EINVAL;
5393         }
5394
5395         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5396         memset(mode, 0, sizeof(*mode));
5397         return eth_err(port_id,
5398                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5399 }
5400
5401 int
5402 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5403                           struct rte_eth_burst_mode *mode)
5404 {
5405         struct rte_eth_dev *dev;
5406
5407         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5408         dev = &rte_eth_devices[port_id];
5409
5410         if (queue_id >= dev->data->nb_tx_queues) {
5411                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5412                 return -EINVAL;
5413         }
5414
5415         if (mode == NULL) {
5416                 RTE_ETHDEV_LOG(ERR,
5417                         "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5418                         port_id, queue_id);
5419                 return -EINVAL;
5420         }
5421
5422         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5423         memset(mode, 0, sizeof(*mode));
5424         return eth_err(port_id,
5425                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5426 }
5427
5428 int
5429 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5430                 struct rte_power_monitor_cond *pmc)
5431 {
5432         struct rte_eth_dev *dev;
5433
5434         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5435         dev = &rte_eth_devices[port_id];
5436
5437         if (queue_id >= dev->data->nb_rx_queues) {
5438                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5439                 return -EINVAL;
5440         }
5441
5442         if (pmc == NULL) {
5443                 RTE_ETHDEV_LOG(ERR,
5444                         "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5445                         port_id, queue_id);
5446                 return -EINVAL;
5447         }
5448
5449         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
5450         return eth_err(port_id,
5451                 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5452 }
5453
5454 int
5455 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5456                              struct rte_ether_addr *mc_addr_set,
5457                              uint32_t nb_mc_addr)
5458 {
5459         struct rte_eth_dev *dev;
5460
5461         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5462         dev = &rte_eth_devices[port_id];
5463
5464         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5465         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5466                                                 mc_addr_set, nb_mc_addr));
5467 }
5468
5469 int
5470 rte_eth_timesync_enable(uint16_t port_id)
5471 {
5472         struct rte_eth_dev *dev;
5473
5474         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5475         dev = &rte_eth_devices[port_id];
5476
5477         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5478         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5479 }
5480
5481 int
5482 rte_eth_timesync_disable(uint16_t port_id)
5483 {
5484         struct rte_eth_dev *dev;
5485
5486         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5487         dev = &rte_eth_devices[port_id];
5488
5489         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5490         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5491 }
5492
5493 int
5494 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5495                                    uint32_t flags)
5496 {
5497         struct rte_eth_dev *dev;
5498
5499         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5500         dev = &rte_eth_devices[port_id];
5501
5502         if (timestamp == NULL) {
5503                 RTE_ETHDEV_LOG(ERR,
5504                         "Cannot read ethdev port %u Rx timestamp to NULL\n",
5505                         port_id);
5506                 return -EINVAL;
5507         }
5508
5509         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5510         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5511                                 (dev, timestamp, flags));
5512 }
5513
5514 int
5515 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5516                                    struct timespec *timestamp)
5517 {
5518         struct rte_eth_dev *dev;
5519
5520         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5521         dev = &rte_eth_devices[port_id];
5522
5523         if (timestamp == NULL) {
5524                 RTE_ETHDEV_LOG(ERR,
5525                         "Cannot read ethdev port %u Tx timestamp to NULL\n",
5526                         port_id);
5527                 return -EINVAL;
5528         }
5529
5530         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5531         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5532                                 (dev, timestamp));
5533 }
5534
5535 int
5536 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5537 {
5538         struct rte_eth_dev *dev;
5539
5540         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5541         dev = &rte_eth_devices[port_id];
5542
5543         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5544         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5545 }
5546
5547 int
5548 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5549 {
5550         struct rte_eth_dev *dev;
5551
5552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5553         dev = &rte_eth_devices[port_id];
5554
5555         if (timestamp == NULL) {
5556                 RTE_ETHDEV_LOG(ERR,
5557                         "Cannot read ethdev port %u timesync time to NULL\n",
5558                         port_id);
5559                 return -EINVAL;
5560         }
5561
5562         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5563         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5564                                                                 timestamp));
5565 }
5566
5567 int
5568 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5569 {
5570         struct rte_eth_dev *dev;
5571
5572         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5573         dev = &rte_eth_devices[port_id];
5574
5575         if (timestamp == NULL) {
5576                 RTE_ETHDEV_LOG(ERR,
5577                         "Cannot write ethdev port %u timesync from NULL time\n",
5578                         port_id);
5579                 return -EINVAL;
5580         }
5581
5582         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5583         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5584                                                                 timestamp));
5585 }
5586
5587 int
5588 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5589 {
5590         struct rte_eth_dev *dev;
5591
5592         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5593         dev = &rte_eth_devices[port_id];
5594
5595         if (clock == NULL) {
5596                 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5597                         port_id);
5598                 return -EINVAL;
5599         }
5600
5601         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5602         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5603 }
5604
5605 int
5606 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5607 {
5608         struct rte_eth_dev *dev;
5609
5610         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5611         dev = &rte_eth_devices[port_id];
5612
5613         if (info == NULL) {
5614                 RTE_ETHDEV_LOG(ERR,
5615                         "Cannot get ethdev port %u register info to NULL\n",
5616                         port_id);
5617                 return -EINVAL;
5618         }
5619
5620         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5621         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5622 }
5623
5624 int
5625 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5626 {
5627         struct rte_eth_dev *dev;
5628
5629         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5630         dev = &rte_eth_devices[port_id];
5631
5632         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5633         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5634 }
5635
5636 int
5637 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5638 {
5639         struct rte_eth_dev *dev;
5640
5641         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5642         dev = &rte_eth_devices[port_id];
5643
5644         if (info == NULL) {
5645                 RTE_ETHDEV_LOG(ERR,
5646                         "Cannot get ethdev port %u EEPROM info to NULL\n",
5647                         port_id);
5648                 return -EINVAL;
5649         }
5650
5651         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5652         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5653 }
5654
5655 int
5656 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5657 {
5658         struct rte_eth_dev *dev;
5659
5660         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5661         dev = &rte_eth_devices[port_id];
5662
5663         if (info == NULL) {
5664                 RTE_ETHDEV_LOG(ERR,
5665                         "Cannot set ethdev port %u EEPROM from NULL info\n",
5666                         port_id);
5667                 return -EINVAL;
5668         }
5669
5670         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5671         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5672 }
5673
5674 int
5675 rte_eth_dev_get_module_info(uint16_t port_id,
5676                             struct rte_eth_dev_module_info *modinfo)
5677 {
5678         struct rte_eth_dev *dev;
5679
5680         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5681         dev = &rte_eth_devices[port_id];
5682
5683         if (modinfo == NULL) {
5684                 RTE_ETHDEV_LOG(ERR,
5685                         "Cannot get ethdev port %u EEPROM module info to NULL\n",
5686                         port_id);
5687                 return -EINVAL;
5688         }
5689
5690         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5691         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5692 }
5693
5694 int
5695 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5696                               struct rte_dev_eeprom_info *info)
5697 {
5698         struct rte_eth_dev *dev;
5699
5700         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5701         dev = &rte_eth_devices[port_id];
5702
5703         if (info == NULL) {
5704                 RTE_ETHDEV_LOG(ERR,
5705                         "Cannot get ethdev port %u module EEPROM info to NULL\n",
5706                         port_id);
5707                 return -EINVAL;
5708         }
5709
5710         if (info->data == NULL) {
5711                 RTE_ETHDEV_LOG(ERR,
5712                         "Cannot get ethdev port %u module EEPROM data to NULL\n",
5713                         port_id);
5714                 return -EINVAL;
5715         }
5716
5717         if (info->length == 0) {
5718                 RTE_ETHDEV_LOG(ERR,
5719                         "Cannot get ethdev port %u module EEPROM to data with zero size\n",
5720                         port_id);
5721                 return -EINVAL;
5722         }
5723
5724         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5725         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5726 }
5727
5728 int
5729 rte_eth_dev_get_dcb_info(uint16_t port_id,
5730                              struct rte_eth_dcb_info *dcb_info)
5731 {
5732         struct rte_eth_dev *dev;
5733
5734         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5735         dev = &rte_eth_devices[port_id];
5736
5737         if (dcb_info == NULL) {
5738                 RTE_ETHDEV_LOG(ERR,
5739                         "Cannot get ethdev port %u DCB info to NULL\n",
5740                         port_id);
5741                 return -EINVAL;
5742         }
5743
5744         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5745
5746         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5747         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5748 }
5749
5750 static void
5751 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5752                 const struct rte_eth_desc_lim *desc_lim)
5753 {
5754         if (desc_lim->nb_align != 0)
5755                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5756
5757         if (desc_lim->nb_max != 0)
5758                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5759
5760         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5761 }
5762
5763 int
5764 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5765                                  uint16_t *nb_rx_desc,
5766                                  uint16_t *nb_tx_desc)
5767 {
5768         struct rte_eth_dev_info dev_info;
5769         int ret;
5770
5771         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5772
5773         ret = rte_eth_dev_info_get(port_id, &dev_info);
5774         if (ret != 0)
5775                 return ret;
5776
5777         if (nb_rx_desc != NULL)
5778                 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5779
5780         if (nb_tx_desc != NULL)
5781                 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5782
5783         return 0;
5784 }
5785
5786 int
5787 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5788                                    struct rte_eth_hairpin_cap *cap)
5789 {
5790         struct rte_eth_dev *dev;
5791
5792         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5793         dev = &rte_eth_devices[port_id];
5794
5795         if (cap == NULL) {
5796                 RTE_ETHDEV_LOG(ERR,
5797                         "Cannot get ethdev port %u hairpin capability to NULL\n",
5798                         port_id);
5799                 return -EINVAL;
5800         }
5801
5802         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5803         memset(cap, 0, sizeof(*cap));
5804         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5805 }
5806
5807 int
5808 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5809 {
5810         if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5811                 return 1;
5812         return 0;
5813 }
5814
5815 int
5816 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5817 {
5818         if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5819                 return 1;
5820         return 0;
5821 }
5822
5823 int
5824 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5825 {
5826         struct rte_eth_dev *dev;
5827
5828         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5829         dev = &rte_eth_devices[port_id];
5830
5831         if (pool == NULL) {
5832                 RTE_ETHDEV_LOG(ERR,
5833                         "Cannot test ethdev port %u mempool operation from NULL pool\n",
5834                         port_id);
5835                 return -EINVAL;
5836         }
5837
5838         if (*dev->dev_ops->pool_ops_supported == NULL)
5839                 return 1; /* all pools are supported */
5840
5841         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5842 }
5843
5844 /**
5845  * A set of values to describe the possible states of a switch domain.
5846  */
5847 enum rte_eth_switch_domain_state {
5848         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5849         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5850 };
5851
5852 /**
5853  * Array of switch domains available for allocation. Array is sized to
5854  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5855  * ethdev ports in a single process.
5856  */
5857 static struct rte_eth_dev_switch {
5858         enum rte_eth_switch_domain_state state;
5859 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
5860
5861 int
5862 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5863 {
5864         uint16_t i;
5865
5866         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5867
5868         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5869                 if (eth_dev_switch_domains[i].state ==
5870                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5871                         eth_dev_switch_domains[i].state =
5872                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5873                         *domain_id = i;
5874                         return 0;
5875                 }
5876         }
5877
5878         return -ENOSPC;
5879 }
5880
5881 int
5882 rte_eth_switch_domain_free(uint16_t domain_id)
5883 {
5884         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5885                 domain_id >= RTE_MAX_ETHPORTS)
5886                 return -EINVAL;
5887
5888         if (eth_dev_switch_domains[domain_id].state !=
5889                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5890                 return -EINVAL;
5891
5892         eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5893
5894         return 0;
5895 }
5896
5897 static int
5898 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5899 {
5900         int state;
5901         struct rte_kvargs_pair *pair;
5902         char *letter;
5903
5904         arglist->str = strdup(str_in);
5905         if (arglist->str == NULL)
5906                 return -ENOMEM;
5907
5908         letter = arglist->str;
5909         state = 0;
5910         arglist->count = 0;
5911         pair = &arglist->pairs[0];
5912         while (1) {
5913                 switch (state) {
5914                 case 0: /* Initial */
5915                         if (*letter == '=')
5916                                 return -EINVAL;
5917                         else if (*letter == '\0')
5918                                 return 0;
5919
5920                         state = 1;
5921                         pair->key = letter;
5922                         /* fall-thru */
5923
5924                 case 1: /* Parsing key */
5925                         if (*letter == '=') {
5926                                 *letter = '\0';
5927                                 pair->value = letter + 1;
5928                                 state = 2;
5929                         } else if (*letter == ',' || *letter == '\0')
5930                                 return -EINVAL;
5931                         break;
5932
5933
5934                 case 2: /* Parsing value */
5935                         if (*letter == '[')
5936                                 state = 3;
5937                         else if (*letter == ',') {
5938                                 *letter = '\0';
5939                                 arglist->count++;
5940                                 pair = &arglist->pairs[arglist->count];
5941                                 state = 0;
5942                         } else if (*letter == '\0') {
5943                                 letter--;
5944                                 arglist->count++;
5945                                 pair = &arglist->pairs[arglist->count];
5946                                 state = 0;
5947                         }
5948                         break;
5949
5950                 case 3: /* Parsing list */
5951                         if (*letter == ']')
5952                                 state = 2;
5953                         else if (*letter == '\0')
5954                                 return -EINVAL;
5955                         break;
5956                 }
5957                 letter++;
5958         }
5959 }
5960
5961 int
5962 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5963 {
5964         struct rte_kvargs args;
5965         struct rte_kvargs_pair *pair;
5966         unsigned int i;
5967         int result = 0;
5968
5969         memset(eth_da, 0, sizeof(*eth_da));
5970
5971         result = eth_dev_devargs_tokenise(&args, dargs);
5972         if (result < 0)
5973                 goto parse_cleanup;
5974
5975         for (i = 0; i < args.count; i++) {
5976                 pair = &args.pairs[i];
5977                 if (strcmp("representor", pair->key) == 0) {
5978                         if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
5979                                 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
5980                                         dargs);
5981                                 result = -1;
5982                                 goto parse_cleanup;
5983                         }
5984                         result = rte_eth_devargs_parse_representor_ports(
5985                                         pair->value, eth_da);
5986                         if (result < 0)
5987                                 goto parse_cleanup;
5988                 }
5989         }
5990
5991 parse_cleanup:
5992         if (args.str)
5993                 free(args.str);
5994
5995         return result;
5996 }
5997
5998 int
5999 rte_eth_representor_id_get(const struct rte_eth_dev *ethdev,
6000                            enum rte_eth_representor_type type,
6001                            int controller, int pf, int representor_port,
6002                            uint16_t *repr_id)
6003 {
6004         int ret, n, count;
6005         uint32_t i;
6006         struct rte_eth_representor_info *info = NULL;
6007         size_t size;
6008
6009         if (type == RTE_ETH_REPRESENTOR_NONE)
6010                 return 0;
6011         if (repr_id == NULL)
6012                 return -EINVAL;
6013
6014         /* Get PMD representor range info. */
6015         ret = rte_eth_representor_info_get(ethdev->data->port_id, NULL);
6016         if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
6017             controller == -1 && pf == -1) {
6018                 /* Direct mapping for legacy VF representor. */
6019                 *repr_id = representor_port;
6020                 return 0;
6021         } else if (ret < 0) {
6022                 return ret;
6023         }
6024         n = ret;
6025         size = sizeof(*info) + n * sizeof(info->ranges[0]);
6026         info = calloc(1, size);
6027         if (info == NULL)
6028                 return -ENOMEM;
6029         info->nb_ranges_alloc = n;
6030         ret = rte_eth_representor_info_get(ethdev->data->port_id, info);
6031         if (ret < 0)
6032                 goto out;
6033
6034         /* Default controller and pf to caller. */
6035         if (controller == -1)
6036                 controller = info->controller;
6037         if (pf == -1)
6038                 pf = info->pf;
6039
6040         /* Locate representor ID. */
6041         ret = -ENOENT;
6042         for (i = 0; i < info->nb_ranges; ++i) {
6043                 if (info->ranges[i].type != type)
6044                         continue;
6045                 if (info->ranges[i].controller != controller)
6046                         continue;
6047                 if (info->ranges[i].id_end < info->ranges[i].id_base) {
6048                         RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
6049                                 ethdev->data->port_id, info->ranges[i].id_base,
6050                                 info->ranges[i].id_end, i);
6051                         continue;
6052
6053                 }
6054                 count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
6055                 switch (info->ranges[i].type) {
6056                 case RTE_ETH_REPRESENTOR_PF:
6057                         if (pf < info->ranges[i].pf ||
6058                             pf >= info->ranges[i].pf + count)
6059                                 continue;
6060                         *repr_id = info->ranges[i].id_base +
6061                                    (pf - info->ranges[i].pf);
6062                         ret = 0;
6063                         goto out;
6064                 case RTE_ETH_REPRESENTOR_VF:
6065                         if (info->ranges[i].pf != pf)
6066                                 continue;
6067                         if (representor_port < info->ranges[i].vf ||
6068                             representor_port >= info->ranges[i].vf + count)
6069                                 continue;
6070                         *repr_id = info->ranges[i].id_base +
6071                                    (representor_port - info->ranges[i].vf);
6072                         ret = 0;
6073                         goto out;
6074                 case RTE_ETH_REPRESENTOR_SF:
6075                         if (info->ranges[i].pf != pf)
6076                                 continue;
6077                         if (representor_port < info->ranges[i].sf ||
6078                             representor_port >= info->ranges[i].sf + count)
6079                                 continue;
6080                         *repr_id = info->ranges[i].id_base +
6081                               (representor_port - info->ranges[i].sf);
6082                         ret = 0;
6083                         goto out;
6084                 default:
6085                         break;
6086                 }
6087         }
6088 out:
6089         free(info);
6090         return ret;
6091 }
6092
6093 static int
6094 eth_dev_handle_port_list(const char *cmd __rte_unused,
6095                 const char *params __rte_unused,
6096                 struct rte_tel_data *d)
6097 {
6098         int port_id;
6099
6100         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
6101         RTE_ETH_FOREACH_DEV(port_id)
6102                 rte_tel_data_add_array_int(d, port_id);
6103         return 0;
6104 }
6105
6106 static void
6107 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
6108                 const char *stat_name)
6109 {
6110         int q;
6111         struct rte_tel_data *q_data = rte_tel_data_alloc();
6112         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
6113         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
6114                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
6115         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
6116 }
6117
6118 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
6119
6120 static int
6121 eth_dev_handle_port_stats(const char *cmd __rte_unused,
6122                 const char *params,
6123                 struct rte_tel_data *d)
6124 {
6125         struct rte_eth_stats stats;
6126         int port_id, ret;
6127
6128         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6129                 return -1;
6130
6131         port_id = atoi(params);
6132         if (!rte_eth_dev_is_valid_port(port_id))
6133                 return -1;
6134
6135         ret = rte_eth_stats_get(port_id, &stats);
6136         if (ret < 0)
6137                 return -1;
6138
6139         rte_tel_data_start_dict(d);
6140         ADD_DICT_STAT(stats, ipackets);
6141         ADD_DICT_STAT(stats, opackets);
6142         ADD_DICT_STAT(stats, ibytes);
6143         ADD_DICT_STAT(stats, obytes);
6144         ADD_DICT_STAT(stats, imissed);
6145         ADD_DICT_STAT(stats, ierrors);
6146         ADD_DICT_STAT(stats, oerrors);
6147         ADD_DICT_STAT(stats, rx_nombuf);
6148         eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
6149         eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
6150         eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
6151         eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
6152         eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
6153
6154         return 0;
6155 }
6156
6157 static int
6158 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
6159                 const char *params,
6160                 struct rte_tel_data *d)
6161 {
6162         struct rte_eth_xstat *eth_xstats;
6163         struct rte_eth_xstat_name *xstat_names;
6164         int port_id, num_xstats;
6165         int i, ret;
6166         char *end_param;
6167
6168         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6169                 return -1;
6170
6171         port_id = strtoul(params, &end_param, 0);
6172         if (*end_param != '\0')
6173                 RTE_ETHDEV_LOG(NOTICE,
6174                         "Extra parameters passed to ethdev telemetry command, ignoring");
6175         if (!rte_eth_dev_is_valid_port(port_id))
6176                 return -1;
6177
6178         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
6179         if (num_xstats < 0)
6180                 return -1;
6181
6182         /* use one malloc for both names and stats */
6183         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
6184                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
6185         if (eth_xstats == NULL)
6186                 return -1;
6187         xstat_names = (void *)&eth_xstats[num_xstats];
6188
6189         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
6190         if (ret < 0 || ret > num_xstats) {
6191                 free(eth_xstats);
6192                 return -1;
6193         }
6194
6195         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
6196         if (ret < 0 || ret > num_xstats) {
6197                 free(eth_xstats);
6198                 return -1;
6199         }
6200
6201         rte_tel_data_start_dict(d);
6202         for (i = 0; i < num_xstats; i++)
6203                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
6204                                 eth_xstats[i].value);
6205         return 0;
6206 }
6207
6208 static int
6209 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
6210                 const char *params,
6211                 struct rte_tel_data *d)
6212 {
6213         static const char *status_str = "status";
6214         int ret, port_id;
6215         struct rte_eth_link link;
6216         char *end_param;
6217
6218         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6219                 return -1;
6220
6221         port_id = strtoul(params, &end_param, 0);
6222         if (*end_param != '\0')
6223                 RTE_ETHDEV_LOG(NOTICE,
6224                         "Extra parameters passed to ethdev telemetry command, ignoring");
6225         if (!rte_eth_dev_is_valid_port(port_id))
6226                 return -1;
6227
6228         ret = rte_eth_link_get_nowait(port_id, &link);
6229         if (ret < 0)
6230                 return -1;
6231
6232         rte_tel_data_start_dict(d);
6233         if (!link.link_status) {
6234                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
6235                 return 0;
6236         }
6237         rte_tel_data_add_dict_string(d, status_str, "UP");
6238         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
6239         rte_tel_data_add_dict_string(d, "duplex",
6240                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
6241                                 "full-duplex" : "half-duplex");
6242         return 0;
6243 }
6244
6245 int
6246 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
6247                                   struct rte_hairpin_peer_info *cur_info,
6248                                   struct rte_hairpin_peer_info *peer_info,
6249                                   uint32_t direction)
6250 {
6251         struct rte_eth_dev *dev;
6252
6253         /* Current queue information is not mandatory. */
6254         if (peer_info == NULL)
6255                 return -EINVAL;
6256
6257         /* No need to check the validity again. */
6258         dev = &rte_eth_devices[peer_port];
6259         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
6260                                 -ENOTSUP);
6261
6262         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
6263                                         cur_info, peer_info, direction);
6264 }
6265
6266 int
6267 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
6268                                 struct rte_hairpin_peer_info *peer_info,
6269                                 uint32_t direction)
6270 {
6271         struct rte_eth_dev *dev;
6272
6273         if (peer_info == NULL)
6274                 return -EINVAL;
6275
6276         /* No need to check the validity again. */
6277         dev = &rte_eth_devices[cur_port];
6278         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
6279                                 -ENOTSUP);
6280
6281         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
6282                                                         peer_info, direction);
6283 }
6284
6285 int
6286 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
6287                                   uint32_t direction)
6288 {
6289         struct rte_eth_dev *dev;
6290
6291         /* No need to check the validity again. */
6292         dev = &rte_eth_devices[cur_port];
6293         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
6294                                 -ENOTSUP);
6295
6296         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
6297                                                           direction);
6298 }
6299
6300 int
6301 rte_eth_representor_info_get(uint16_t port_id,
6302                              struct rte_eth_representor_info *info)
6303 {
6304         struct rte_eth_dev *dev;
6305
6306         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6307         dev = &rte_eth_devices[port_id];
6308
6309         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
6310         return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6311 }
6312
6313 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
6314
6315 RTE_INIT(ethdev_init_telemetry)
6316 {
6317         rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
6318                         "Returns list of available ethdev ports. Takes no parameters");
6319         rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
6320                         "Returns the common stats for a port. Parameters: int port_id");
6321         rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
6322                         "Returns the extended stats for a port. Parameters: int port_id");
6323         rte_telemetry_register_cmd("/ethdev/link_status",
6324                         eth_dev_handle_port_link_status,
6325                         "Returns the link status for a port. Parameters: int port_id");
6326 }