ethdev: negotiate delivery of packet metadata from HW to PMD
[dpdk.git] / lib / ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13
14 #include <rte_byteorder.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_interrupts.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_memzone.h>
21 #include <rte_launch.h>
22 #include <rte_eal.h>
23 #include <rte_per_lcore.h>
24 #include <rte_lcore.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_common.h>
27 #include <rte_mempool.h>
28 #include <rte_malloc.h>
29 #include <rte_mbuf.h>
30 #include <rte_errno.h>
31 #include <rte_spinlock.h>
32 #include <rte_string_fns.h>
33 #include <rte_kvargs.h>
34 #include <rte_class.h>
35 #include <rte_ether.h>
36 #include <rte_telemetry.h>
37
38 #include "rte_ethdev_trace.h"
39 #include "rte_ethdev.h"
40 #include "ethdev_driver.h"
41 #include "ethdev_profile.h"
42 #include "ethdev_private.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46
47 /* spinlock for eth device callbacks */
48 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* spinlock for add/remove rx callbacks */
51 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
52
53 /* spinlock for add/remove tx callbacks */
54 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for shared data allocation */
57 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* store statistics names and its offset in stats structure  */
60 struct rte_eth_xstats_name_off {
61         char name[RTE_ETH_XSTATS_NAME_SIZE];
62         unsigned offset;
63 };
64
65 /* Shared memory between primary and secondary processes. */
66 static struct {
67         uint64_t next_owner_id;
68         rte_spinlock_t ownership_lock;
69         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
70 } *eth_dev_shared_data;
71
72 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
73         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
74         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
75         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
76         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
77         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
78         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
79         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
80         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
81                 rx_nombuf)},
82 };
83
84 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
85
86 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
87         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
88         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
89         {"errors", offsetof(struct rte_eth_stats, q_errors)},
90 };
91
92 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
93
94 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
95         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
96         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
97 };
98 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
99
100 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
101         { DEV_RX_OFFLOAD_##_name, #_name }
102
103 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)       \
104         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
105
106 static const struct {
107         uint64_t offload;
108         const char *name;
109 } eth_dev_rx_offload_names[] = {
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
111         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
112         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
113         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
115         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
119         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
120         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
121         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
122         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
123         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
124         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
125         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
126         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
127         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
128         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
129         RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
130 };
131
132 #undef RTE_RX_OFFLOAD_BIT2STR
133 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } eth_dev_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
163         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
164 };
165
166 #undef RTE_TX_OFFLOAD_BIT2STR
167
168 /**
169  * The user application callback description.
170  *
171  * It contains callback address to be registered by user application,
172  * the pointer to the parameters for callback, and the event type.
173  */
174 struct rte_eth_dev_callback {
175         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
176         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
177         void *cb_arg;                           /**< Parameter for callback */
178         void *ret_param;                        /**< Return parameter */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 int
189 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
190 {
191         int ret;
192         struct rte_devargs devargs;
193         const char *bus_param_key;
194         char *bus_str = NULL;
195         char *cls_str = NULL;
196         int str_size;
197
198         if (iter == NULL) {
199                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
200                 return -EINVAL;
201         }
202
203         if (devargs_str == NULL) {
204                 RTE_ETHDEV_LOG(ERR,
205                         "Cannot initialize iterator from NULL device description string\n");
206                 return -EINVAL;
207         }
208
209         memset(iter, 0, sizeof(*iter));
210         memset(&devargs, 0, sizeof(devargs));
211
212         /*
213          * The devargs string may use various syntaxes:
214          *   - 0000:08:00.0,representor=[1-3]
215          *   - pci:0000:06:00.0,representor=[0,5]
216          *   - class=eth,mac=00:11:22:33:44:55
217          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
218          */
219
220         /*
221          * Handle pure class filter (i.e. without any bus-level argument),
222          * from future new syntax.
223          * rte_devargs_parse() is not yet supporting the new syntax,
224          * that's why this simple case is temporarily parsed here.
225          */
226 #define iter_anybus_str "class=eth,"
227         if (strncmp(devargs_str, iter_anybus_str,
228                         strlen(iter_anybus_str)) == 0) {
229                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
230                 goto end;
231         }
232
233         /* Split bus, device and parameters. */
234         ret = rte_devargs_parse(&devargs, devargs_str);
235         if (ret != 0)
236                 goto error;
237
238         /*
239          * Assume parameters of old syntax can match only at ethdev level.
240          * Extra parameters will be ignored, thanks to "+" prefix.
241          */
242         str_size = strlen(devargs.args) + 2;
243         cls_str = malloc(str_size);
244         if (cls_str == NULL) {
245                 ret = -ENOMEM;
246                 goto error;
247         }
248         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
249         if (ret != str_size - 1) {
250                 ret = -EINVAL;
251                 goto error;
252         }
253         iter->cls_str = cls_str;
254
255         iter->bus = devargs.bus;
256         if (iter->bus->dev_iterate == NULL) {
257                 ret = -ENOTSUP;
258                 goto error;
259         }
260
261         /* Convert bus args to new syntax for use with new API dev_iterate. */
262         if ((strcmp(iter->bus->name, "vdev") == 0) ||
263                 (strcmp(iter->bus->name, "fslmc") == 0) ||
264                 (strcmp(iter->bus->name, "dpaa_bus") == 0)) {
265                 bus_param_key = "name";
266         } else if (strcmp(iter->bus->name, "pci") == 0) {
267                 bus_param_key = "addr";
268         } else {
269                 ret = -ENOTSUP;
270                 goto error;
271         }
272         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
273         bus_str = malloc(str_size);
274         if (bus_str == NULL) {
275                 ret = -ENOMEM;
276                 goto error;
277         }
278         ret = snprintf(bus_str, str_size, "%s=%s",
279                         bus_param_key, devargs.name);
280         if (ret != str_size - 1) {
281                 ret = -EINVAL;
282                 goto error;
283         }
284         iter->bus_str = bus_str;
285
286 end:
287         iter->cls = rte_class_find_by_name("eth");
288         rte_devargs_reset(&devargs);
289         return 0;
290
291 error:
292         if (ret == -ENOTSUP)
293                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
294                                 iter->bus->name);
295         rte_devargs_reset(&devargs);
296         free(bus_str);
297         free(cls_str);
298         return ret;
299 }
300
301 uint16_t
302 rte_eth_iterator_next(struct rte_dev_iterator *iter)
303 {
304         if (iter == NULL) {
305                 RTE_ETHDEV_LOG(ERR,
306                         "Cannot get next device from NULL iterator\n");
307                 return RTE_MAX_ETHPORTS;
308         }
309
310         if (iter->cls == NULL) /* invalid ethdev iterator */
311                 return RTE_MAX_ETHPORTS;
312
313         do { /* loop to try all matching rte_device */
314                 /* If not pure ethdev filter and */
315                 if (iter->bus != NULL &&
316                                 /* not in middle of rte_eth_dev iteration, */
317                                 iter->class_device == NULL) {
318                         /* get next rte_device to try. */
319                         iter->device = iter->bus->dev_iterate(
320                                         iter->device, iter->bus_str, iter);
321                         if (iter->device == NULL)
322                                 break; /* no more rte_device candidate */
323                 }
324                 /* A device is matching bus part, need to check ethdev part. */
325                 iter->class_device = iter->cls->dev_iterate(
326                                 iter->class_device, iter->cls_str, iter);
327                 if (iter->class_device != NULL)
328                         return eth_dev_to_id(iter->class_device); /* match */
329         } while (iter->bus != NULL); /* need to try next rte_device */
330
331         /* No more ethdev port to iterate. */
332         rte_eth_iterator_cleanup(iter);
333         return RTE_MAX_ETHPORTS;
334 }
335
336 void
337 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
338 {
339         if (iter == NULL) {
340                 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
341                 return;
342         }
343
344         if (iter->bus_str == NULL)
345                 return; /* nothing to free in pure class filter */
346         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
347         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
348         memset(iter, 0, sizeof(*iter));
349 }
350
351 uint16_t
352 rte_eth_find_next(uint16_t port_id)
353 {
354         while (port_id < RTE_MAX_ETHPORTS &&
355                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
356                 port_id++;
357
358         if (port_id >= RTE_MAX_ETHPORTS)
359                 return RTE_MAX_ETHPORTS;
360
361         return port_id;
362 }
363
364 /*
365  * Macro to iterate over all valid ports for internal usage.
366  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
367  */
368 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
369         for (port_id = rte_eth_find_next(0); \
370              port_id < RTE_MAX_ETHPORTS; \
371              port_id = rte_eth_find_next(port_id + 1))
372
373 uint16_t
374 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
375 {
376         port_id = rte_eth_find_next(port_id);
377         while (port_id < RTE_MAX_ETHPORTS &&
378                         rte_eth_devices[port_id].device != parent)
379                 port_id = rte_eth_find_next(port_id + 1);
380
381         return port_id;
382 }
383
384 uint16_t
385 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
386 {
387         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
388         return rte_eth_find_next_of(port_id,
389                         rte_eth_devices[ref_port_id].device);
390 }
391
392 static void
393 eth_dev_shared_data_prepare(void)
394 {
395         const unsigned flags = 0;
396         const struct rte_memzone *mz;
397
398         rte_spinlock_lock(&eth_dev_shared_data_lock);
399
400         if (eth_dev_shared_data == NULL) {
401                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
402                         /* Allocate port data and ownership shared memory. */
403                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
404                                         sizeof(*eth_dev_shared_data),
405                                         rte_socket_id(), flags);
406                 } else
407                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
408                 if (mz == NULL)
409                         rte_panic("Cannot allocate ethdev shared data\n");
410
411                 eth_dev_shared_data = mz->addr;
412                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
413                         eth_dev_shared_data->next_owner_id =
414                                         RTE_ETH_DEV_NO_OWNER + 1;
415                         rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
416                         memset(eth_dev_shared_data->data, 0,
417                                sizeof(eth_dev_shared_data->data));
418                 }
419         }
420
421         rte_spinlock_unlock(&eth_dev_shared_data_lock);
422 }
423
424 static bool
425 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
426 {
427         return ethdev->data->name[0] != '\0';
428 }
429
430 static struct rte_eth_dev *
431 eth_dev_allocated(const char *name)
432 {
433         uint16_t i;
434
435         RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
436
437         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
438                 if (rte_eth_devices[i].data != NULL &&
439                     strcmp(rte_eth_devices[i].data->name, name) == 0)
440                         return &rte_eth_devices[i];
441         }
442         return NULL;
443 }
444
445 struct rte_eth_dev *
446 rte_eth_dev_allocated(const char *name)
447 {
448         struct rte_eth_dev *ethdev;
449
450         eth_dev_shared_data_prepare();
451
452         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
453
454         ethdev = eth_dev_allocated(name);
455
456         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
457
458         return ethdev;
459 }
460
461 static uint16_t
462 eth_dev_find_free_port(void)
463 {
464         uint16_t i;
465
466         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
467                 /* Using shared name field to find a free port. */
468                 if (eth_dev_shared_data->data[i].name[0] == '\0') {
469                         RTE_ASSERT(rte_eth_devices[i].state ==
470                                    RTE_ETH_DEV_UNUSED);
471                         return i;
472                 }
473         }
474         return RTE_MAX_ETHPORTS;
475 }
476
477 static struct rte_eth_dev *
478 eth_dev_get(uint16_t port_id)
479 {
480         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
481
482         eth_dev->data = &eth_dev_shared_data->data[port_id];
483
484         return eth_dev;
485 }
486
487 struct rte_eth_dev *
488 rte_eth_dev_allocate(const char *name)
489 {
490         uint16_t port_id;
491         struct rte_eth_dev *eth_dev = NULL;
492         size_t name_len;
493
494         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
495         if (name_len == 0) {
496                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
497                 return NULL;
498         }
499
500         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
501                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
502                 return NULL;
503         }
504
505         eth_dev_shared_data_prepare();
506
507         /* Synchronize port creation between primary and secondary threads. */
508         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
509
510         if (eth_dev_allocated(name) != NULL) {
511                 RTE_ETHDEV_LOG(ERR,
512                         "Ethernet device with name %s already allocated\n",
513                         name);
514                 goto unlock;
515         }
516
517         port_id = eth_dev_find_free_port();
518         if (port_id == RTE_MAX_ETHPORTS) {
519                 RTE_ETHDEV_LOG(ERR,
520                         "Reached maximum number of Ethernet ports\n");
521                 goto unlock;
522         }
523
524         eth_dev = eth_dev_get(port_id);
525         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
526         eth_dev->data->port_id = port_id;
527         eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
528         eth_dev->data->mtu = RTE_ETHER_MTU;
529         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
530
531 unlock:
532         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
533
534         return eth_dev;
535 }
536
537 /*
538  * Attach to a port already registered by the primary process, which
539  * makes sure that the same device would have the same port id both
540  * in the primary and secondary process.
541  */
542 struct rte_eth_dev *
543 rte_eth_dev_attach_secondary(const char *name)
544 {
545         uint16_t i;
546         struct rte_eth_dev *eth_dev = NULL;
547
548         eth_dev_shared_data_prepare();
549
550         /* Synchronize port attachment to primary port creation and release. */
551         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
552
553         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
554                 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
555                         break;
556         }
557         if (i == RTE_MAX_ETHPORTS) {
558                 RTE_ETHDEV_LOG(ERR,
559                         "Device %s is not driven by the primary process\n",
560                         name);
561         } else {
562                 eth_dev = eth_dev_get(i);
563                 RTE_ASSERT(eth_dev->data->port_id == i);
564         }
565
566         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
567         return eth_dev;
568 }
569
570 int
571 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
572 {
573         if (eth_dev == NULL)
574                 return -EINVAL;
575
576         eth_dev_shared_data_prepare();
577
578         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
579                 rte_eth_dev_callback_process(eth_dev,
580                                 RTE_ETH_EVENT_DESTROY, NULL);
581
582         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
583
584         eth_dev->state = RTE_ETH_DEV_UNUSED;
585         eth_dev->device = NULL;
586         eth_dev->process_private = NULL;
587         eth_dev->intr_handle = NULL;
588         eth_dev->rx_pkt_burst = NULL;
589         eth_dev->tx_pkt_burst = NULL;
590         eth_dev->tx_pkt_prepare = NULL;
591         eth_dev->rx_queue_count = NULL;
592         eth_dev->rx_descriptor_status = NULL;
593         eth_dev->tx_descriptor_status = NULL;
594         eth_dev->dev_ops = NULL;
595
596         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
597                 rte_free(eth_dev->data->rx_queues);
598                 rte_free(eth_dev->data->tx_queues);
599                 rte_free(eth_dev->data->mac_addrs);
600                 rte_free(eth_dev->data->hash_mac_addrs);
601                 rte_free(eth_dev->data->dev_private);
602                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
603                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
604         }
605
606         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
607
608         return 0;
609 }
610
611 int
612 rte_eth_dev_is_valid_port(uint16_t port_id)
613 {
614         if (port_id >= RTE_MAX_ETHPORTS ||
615             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
616                 return 0;
617         else
618                 return 1;
619 }
620
621 static int
622 eth_is_valid_owner_id(uint64_t owner_id)
623 {
624         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
625             eth_dev_shared_data->next_owner_id <= owner_id)
626                 return 0;
627         return 1;
628 }
629
630 uint64_t
631 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
632 {
633         port_id = rte_eth_find_next(port_id);
634         while (port_id < RTE_MAX_ETHPORTS &&
635                         rte_eth_devices[port_id].data->owner.id != owner_id)
636                 port_id = rte_eth_find_next(port_id + 1);
637
638         return port_id;
639 }
640
641 int
642 rte_eth_dev_owner_new(uint64_t *owner_id)
643 {
644         if (owner_id == NULL) {
645                 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
646                 return -EINVAL;
647         }
648
649         eth_dev_shared_data_prepare();
650
651         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
652
653         *owner_id = eth_dev_shared_data->next_owner_id++;
654
655         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
656         return 0;
657 }
658
659 static int
660 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
661                        const struct rte_eth_dev_owner *new_owner)
662 {
663         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
664         struct rte_eth_dev_owner *port_owner;
665
666         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
667                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
668                         port_id);
669                 return -ENODEV;
670         }
671
672         if (new_owner == NULL) {
673                 RTE_ETHDEV_LOG(ERR,
674                         "Cannot set ethdev port %u owner from NULL owner\n",
675                         port_id);
676                 return -EINVAL;
677         }
678
679         if (!eth_is_valid_owner_id(new_owner->id) &&
680             !eth_is_valid_owner_id(old_owner_id)) {
681                 RTE_ETHDEV_LOG(ERR,
682                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
683                        old_owner_id, new_owner->id);
684                 return -EINVAL;
685         }
686
687         port_owner = &rte_eth_devices[port_id].data->owner;
688         if (port_owner->id != old_owner_id) {
689                 RTE_ETHDEV_LOG(ERR,
690                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
691                         port_id, port_owner->name, port_owner->id);
692                 return -EPERM;
693         }
694
695         /* can not truncate (same structure) */
696         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
697
698         port_owner->id = new_owner->id;
699
700         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
701                 port_id, new_owner->name, new_owner->id);
702
703         return 0;
704 }
705
706 int
707 rte_eth_dev_owner_set(const uint16_t port_id,
708                       const struct rte_eth_dev_owner *owner)
709 {
710         int ret;
711
712         eth_dev_shared_data_prepare();
713
714         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
715
716         ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
717
718         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
719         return ret;
720 }
721
722 int
723 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
724 {
725         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
726                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
727         int ret;
728
729         eth_dev_shared_data_prepare();
730
731         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
732
733         ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
734
735         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
736         return ret;
737 }
738
739 int
740 rte_eth_dev_owner_delete(const uint64_t owner_id)
741 {
742         uint16_t port_id;
743         int ret = 0;
744
745         eth_dev_shared_data_prepare();
746
747         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
748
749         if (eth_is_valid_owner_id(owner_id)) {
750                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
751                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
752                                 memset(&rte_eth_devices[port_id].data->owner, 0,
753                                        sizeof(struct rte_eth_dev_owner));
754                 RTE_ETHDEV_LOG(NOTICE,
755                         "All port owners owned by %016"PRIx64" identifier have removed\n",
756                         owner_id);
757         } else {
758                 RTE_ETHDEV_LOG(ERR,
759                                "Invalid owner id=%016"PRIx64"\n",
760                                owner_id);
761                 ret = -EINVAL;
762         }
763
764         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
765
766         return ret;
767 }
768
769 int
770 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
771 {
772         struct rte_eth_dev *ethdev;
773
774         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
775         ethdev = &rte_eth_devices[port_id];
776
777         if (!eth_dev_is_allocated(ethdev)) {
778                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
779                         port_id);
780                 return -ENODEV;
781         }
782
783         if (owner == NULL) {
784                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
785                         port_id);
786                 return -EINVAL;
787         }
788
789         eth_dev_shared_data_prepare();
790
791         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
792         rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
793         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
794
795         return 0;
796 }
797
798 int
799 rte_eth_dev_socket_id(uint16_t port_id)
800 {
801         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
802         return rte_eth_devices[port_id].data->numa_node;
803 }
804
805 void *
806 rte_eth_dev_get_sec_ctx(uint16_t port_id)
807 {
808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
809         return rte_eth_devices[port_id].security_ctx;
810 }
811
812 uint16_t
813 rte_eth_dev_count_avail(void)
814 {
815         uint16_t p;
816         uint16_t count;
817
818         count = 0;
819
820         RTE_ETH_FOREACH_DEV(p)
821                 count++;
822
823         return count;
824 }
825
826 uint16_t
827 rte_eth_dev_count_total(void)
828 {
829         uint16_t port, count = 0;
830
831         RTE_ETH_FOREACH_VALID_DEV(port)
832                 count++;
833
834         return count;
835 }
836
837 int
838 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
839 {
840         char *tmp;
841
842         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
843
844         if (name == NULL) {
845                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
846                         port_id);
847                 return -EINVAL;
848         }
849
850         /* shouldn't check 'rte_eth_devices[i].data',
851          * because it might be overwritten by VDEV PMD */
852         tmp = eth_dev_shared_data->data[port_id].name;
853         strcpy(name, tmp);
854         return 0;
855 }
856
857 int
858 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
859 {
860         uint16_t pid;
861
862         if (name == NULL) {
863                 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
864                 return -EINVAL;
865         }
866
867         if (port_id == NULL) {
868                 RTE_ETHDEV_LOG(ERR,
869                         "Cannot get port ID to NULL for %s\n", name);
870                 return -EINVAL;
871         }
872
873         RTE_ETH_FOREACH_VALID_DEV(pid)
874                 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
875                         *port_id = pid;
876                         return 0;
877                 }
878
879         return -ENODEV;
880 }
881
882 static int
883 eth_err(uint16_t port_id, int ret)
884 {
885         if (ret == 0)
886                 return 0;
887         if (rte_eth_dev_is_removed(port_id))
888                 return -EIO;
889         return ret;
890 }
891
892 static void
893 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
894 {
895         void **rxq = dev->data->rx_queues;
896
897         if (rxq[qid] == NULL)
898                 return;
899
900         if (dev->dev_ops->rx_queue_release != NULL)
901                 (*dev->dev_ops->rx_queue_release)(dev, qid);
902         rxq[qid] = NULL;
903 }
904
905 static void
906 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
907 {
908         void **txq = dev->data->tx_queues;
909
910         if (txq[qid] == NULL)
911                 return;
912
913         if (dev->dev_ops->tx_queue_release != NULL)
914                 (*dev->dev_ops->tx_queue_release)(dev, qid);
915         txq[qid] = NULL;
916 }
917
918 static int
919 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
920 {
921         uint16_t old_nb_queues = dev->data->nb_rx_queues;
922         void **rxq;
923         unsigned i;
924
925         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
926                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
927                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
928                                 RTE_CACHE_LINE_SIZE);
929                 if (dev->data->rx_queues == NULL) {
930                         dev->data->nb_rx_queues = 0;
931                         return -(ENOMEM);
932                 }
933         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
934                 for (i = nb_queues; i < old_nb_queues; i++)
935                         eth_dev_rxq_release(dev, i);
936
937                 rxq = dev->data->rx_queues;
938                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
939                                 RTE_CACHE_LINE_SIZE);
940                 if (rxq == NULL)
941                         return -(ENOMEM);
942                 if (nb_queues > old_nb_queues) {
943                         uint16_t new_qs = nb_queues - old_nb_queues;
944
945                         memset(rxq + old_nb_queues, 0,
946                                 sizeof(rxq[0]) * new_qs);
947                 }
948
949                 dev->data->rx_queues = rxq;
950
951         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
952                 for (i = nb_queues; i < old_nb_queues; i++)
953                         eth_dev_rxq_release(dev, i);
954
955                 rte_free(dev->data->rx_queues);
956                 dev->data->rx_queues = NULL;
957         }
958         dev->data->nb_rx_queues = nb_queues;
959         return 0;
960 }
961
962 static int
963 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
964 {
965         uint16_t port_id;
966
967         if (rx_queue_id >= dev->data->nb_rx_queues) {
968                 port_id = dev->data->port_id;
969                 RTE_ETHDEV_LOG(ERR,
970                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
971                                rx_queue_id, port_id);
972                 return -EINVAL;
973         }
974
975         if (dev->data->rx_queues[rx_queue_id] == NULL) {
976                 port_id = dev->data->port_id;
977                 RTE_ETHDEV_LOG(ERR,
978                                "Queue %u of device with port_id=%u has not been setup\n",
979                                rx_queue_id, port_id);
980                 return -EINVAL;
981         }
982
983         return 0;
984 }
985
986 static int
987 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
988 {
989         uint16_t port_id;
990
991         if (tx_queue_id >= dev->data->nb_tx_queues) {
992                 port_id = dev->data->port_id;
993                 RTE_ETHDEV_LOG(ERR,
994                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
995                                tx_queue_id, port_id);
996                 return -EINVAL;
997         }
998
999         if (dev->data->tx_queues[tx_queue_id] == NULL) {
1000                 port_id = dev->data->port_id;
1001                 RTE_ETHDEV_LOG(ERR,
1002                                "Queue %u of device with port_id=%u has not been setup\n",
1003                                tx_queue_id, port_id);
1004                 return -EINVAL;
1005         }
1006
1007         return 0;
1008 }
1009
1010 int
1011 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
1012 {
1013         struct rte_eth_dev *dev;
1014         int ret;
1015
1016         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1017         dev = &rte_eth_devices[port_id];
1018
1019         if (!dev->data->dev_started) {
1020                 RTE_ETHDEV_LOG(ERR,
1021                         "Port %u must be started before start any queue\n",
1022                         port_id);
1023                 return -EINVAL;
1024         }
1025
1026         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1027         if (ret != 0)
1028                 return ret;
1029
1030         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
1031
1032         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1033                 RTE_ETHDEV_LOG(INFO,
1034                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1035                         rx_queue_id, port_id);
1036                 return -EINVAL;
1037         }
1038
1039         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1040                 RTE_ETHDEV_LOG(INFO,
1041                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1042                         rx_queue_id, port_id);
1043                 return 0;
1044         }
1045
1046         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
1047 }
1048
1049 int
1050 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
1051 {
1052         struct rte_eth_dev *dev;
1053         int ret;
1054
1055         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1056         dev = &rte_eth_devices[port_id];
1057
1058         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1059         if (ret != 0)
1060                 return ret;
1061
1062         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
1063
1064         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1065                 RTE_ETHDEV_LOG(INFO,
1066                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1067                         rx_queue_id, port_id);
1068                 return -EINVAL;
1069         }
1070
1071         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1072                 RTE_ETHDEV_LOG(INFO,
1073                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1074                         rx_queue_id, port_id);
1075                 return 0;
1076         }
1077
1078         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1079 }
1080
1081 int
1082 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1083 {
1084         struct rte_eth_dev *dev;
1085         int ret;
1086
1087         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1088         dev = &rte_eth_devices[port_id];
1089
1090         if (!dev->data->dev_started) {
1091                 RTE_ETHDEV_LOG(ERR,
1092                         "Port %u must be started before start any queue\n",
1093                         port_id);
1094                 return -EINVAL;
1095         }
1096
1097         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1098         if (ret != 0)
1099                 return ret;
1100
1101         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1102
1103         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1104                 RTE_ETHDEV_LOG(INFO,
1105                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1106                         tx_queue_id, port_id);
1107                 return -EINVAL;
1108         }
1109
1110         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1111                 RTE_ETHDEV_LOG(INFO,
1112                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1113                         tx_queue_id, port_id);
1114                 return 0;
1115         }
1116
1117         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1118 }
1119
1120 int
1121 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1122 {
1123         struct rte_eth_dev *dev;
1124         int ret;
1125
1126         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1127         dev = &rte_eth_devices[port_id];
1128
1129         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1130         if (ret != 0)
1131                 return ret;
1132
1133         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1134
1135         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1136                 RTE_ETHDEV_LOG(INFO,
1137                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1138                         tx_queue_id, port_id);
1139                 return -EINVAL;
1140         }
1141
1142         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1143                 RTE_ETHDEV_LOG(INFO,
1144                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1145                         tx_queue_id, port_id);
1146                 return 0;
1147         }
1148
1149         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1150 }
1151
1152 static int
1153 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1154 {
1155         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1156         void **txq;
1157         unsigned i;
1158
1159         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1160                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1161                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1162                                                    RTE_CACHE_LINE_SIZE);
1163                 if (dev->data->tx_queues == NULL) {
1164                         dev->data->nb_tx_queues = 0;
1165                         return -(ENOMEM);
1166                 }
1167         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1168                 for (i = nb_queues; i < old_nb_queues; i++)
1169                         eth_dev_txq_release(dev, i);
1170
1171                 txq = dev->data->tx_queues;
1172                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1173                                   RTE_CACHE_LINE_SIZE);
1174                 if (txq == NULL)
1175                         return -ENOMEM;
1176                 if (nb_queues > old_nb_queues) {
1177                         uint16_t new_qs = nb_queues - old_nb_queues;
1178
1179                         memset(txq + old_nb_queues, 0,
1180                                sizeof(txq[0]) * new_qs);
1181                 }
1182
1183                 dev->data->tx_queues = txq;
1184
1185         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1186                 for (i = nb_queues; i < old_nb_queues; i++)
1187                         eth_dev_txq_release(dev, i);
1188
1189                 rte_free(dev->data->tx_queues);
1190                 dev->data->tx_queues = NULL;
1191         }
1192         dev->data->nb_tx_queues = nb_queues;
1193         return 0;
1194 }
1195
1196 uint32_t
1197 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1198 {
1199         switch (speed) {
1200         case ETH_SPEED_NUM_10M:
1201                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1202         case ETH_SPEED_NUM_100M:
1203                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1204         case ETH_SPEED_NUM_1G:
1205                 return ETH_LINK_SPEED_1G;
1206         case ETH_SPEED_NUM_2_5G:
1207                 return ETH_LINK_SPEED_2_5G;
1208         case ETH_SPEED_NUM_5G:
1209                 return ETH_LINK_SPEED_5G;
1210         case ETH_SPEED_NUM_10G:
1211                 return ETH_LINK_SPEED_10G;
1212         case ETH_SPEED_NUM_20G:
1213                 return ETH_LINK_SPEED_20G;
1214         case ETH_SPEED_NUM_25G:
1215                 return ETH_LINK_SPEED_25G;
1216         case ETH_SPEED_NUM_40G:
1217                 return ETH_LINK_SPEED_40G;
1218         case ETH_SPEED_NUM_50G:
1219                 return ETH_LINK_SPEED_50G;
1220         case ETH_SPEED_NUM_56G:
1221                 return ETH_LINK_SPEED_56G;
1222         case ETH_SPEED_NUM_100G:
1223                 return ETH_LINK_SPEED_100G;
1224         case ETH_SPEED_NUM_200G:
1225                 return ETH_LINK_SPEED_200G;
1226         default:
1227                 return 0;
1228         }
1229 }
1230
1231 const char *
1232 rte_eth_dev_rx_offload_name(uint64_t offload)
1233 {
1234         const char *name = "UNKNOWN";
1235         unsigned int i;
1236
1237         for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1238                 if (offload == eth_dev_rx_offload_names[i].offload) {
1239                         name = eth_dev_rx_offload_names[i].name;
1240                         break;
1241                 }
1242         }
1243
1244         return name;
1245 }
1246
1247 const char *
1248 rte_eth_dev_tx_offload_name(uint64_t offload)
1249 {
1250         const char *name = "UNKNOWN";
1251         unsigned int i;
1252
1253         for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1254                 if (offload == eth_dev_tx_offload_names[i].offload) {
1255                         name = eth_dev_tx_offload_names[i].name;
1256                         break;
1257                 }
1258         }
1259
1260         return name;
1261 }
1262
1263 static inline int
1264 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1265                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1266 {
1267         int ret = 0;
1268
1269         if (dev_info_size == 0) {
1270                 if (config_size != max_rx_pkt_len) {
1271                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1272                                        " %u != %u is not allowed\n",
1273                                        port_id, config_size, max_rx_pkt_len);
1274                         ret = -EINVAL;
1275                 }
1276         } else if (config_size > dev_info_size) {
1277                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1278                                "> max allowed value %u\n", port_id, config_size,
1279                                dev_info_size);
1280                 ret = -EINVAL;
1281         } else if (config_size < RTE_ETHER_MIN_LEN) {
1282                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1283                                "< min allowed value %u\n", port_id, config_size,
1284                                (unsigned int)RTE_ETHER_MIN_LEN);
1285                 ret = -EINVAL;
1286         }
1287         return ret;
1288 }
1289
1290 /*
1291  * Validate offloads that are requested through rte_eth_dev_configure against
1292  * the offloads successfully set by the ethernet device.
1293  *
1294  * @param port_id
1295  *   The port identifier of the Ethernet device.
1296  * @param req_offloads
1297  *   The offloads that have been requested through `rte_eth_dev_configure`.
1298  * @param set_offloads
1299  *   The offloads successfully set by the ethernet device.
1300  * @param offload_type
1301  *   The offload type i.e. Rx/Tx string.
1302  * @param offload_name
1303  *   The function that prints the offload name.
1304  * @return
1305  *   - (0) if validation successful.
1306  *   - (-EINVAL) if requested offload has been silently disabled.
1307  *
1308  */
1309 static int
1310 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1311                   uint64_t set_offloads, const char *offload_type,
1312                   const char *(*offload_name)(uint64_t))
1313 {
1314         uint64_t offloads_diff = req_offloads ^ set_offloads;
1315         uint64_t offload;
1316         int ret = 0;
1317
1318         while (offloads_diff != 0) {
1319                 /* Check if any offload is requested but not enabled. */
1320                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1321                 if (offload & req_offloads) {
1322                         RTE_ETHDEV_LOG(ERR,
1323                                 "Port %u failed to enable %s offload %s\n",
1324                                 port_id, offload_type, offload_name(offload));
1325                         ret = -EINVAL;
1326                 }
1327
1328                 /* Check if offload couldn't be disabled. */
1329                 if (offload & set_offloads) {
1330                         RTE_ETHDEV_LOG(DEBUG,
1331                                 "Port %u %s offload %s is not requested but enabled\n",
1332                                 port_id, offload_type, offload_name(offload));
1333                 }
1334
1335                 offloads_diff &= ~offload;
1336         }
1337
1338         return ret;
1339 }
1340
1341 int
1342 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1343                       const struct rte_eth_conf *dev_conf)
1344 {
1345         struct rte_eth_dev *dev;
1346         struct rte_eth_dev_info dev_info;
1347         struct rte_eth_conf orig_conf;
1348         uint16_t overhead_len;
1349         int diag;
1350         int ret;
1351         uint16_t old_mtu;
1352
1353         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1354         dev = &rte_eth_devices[port_id];
1355
1356         if (dev_conf == NULL) {
1357                 RTE_ETHDEV_LOG(ERR,
1358                         "Cannot configure ethdev port %u from NULL config\n",
1359                         port_id);
1360                 return -EINVAL;
1361         }
1362
1363         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1364
1365         if (dev->data->dev_started) {
1366                 RTE_ETHDEV_LOG(ERR,
1367                         "Port %u must be stopped to allow configuration\n",
1368                         port_id);
1369                 return -EBUSY;
1370         }
1371
1372         /*
1373          * Ensure that "dev_configured" is always 0 each time prepare to do
1374          * dev_configure() to avoid any non-anticipated behaviour.
1375          * And set to 1 when dev_configure() is executed successfully.
1376          */
1377         dev->data->dev_configured = 0;
1378
1379          /* Store original config, as rollback required on failure */
1380         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1381
1382         /*
1383          * Copy the dev_conf parameter into the dev structure.
1384          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1385          */
1386         if (dev_conf != &dev->data->dev_conf)
1387                 memcpy(&dev->data->dev_conf, dev_conf,
1388                        sizeof(dev->data->dev_conf));
1389
1390         /* Backup mtu for rollback */
1391         old_mtu = dev->data->mtu;
1392
1393         ret = rte_eth_dev_info_get(port_id, &dev_info);
1394         if (ret != 0)
1395                 goto rollback;
1396
1397         /* Get the real Ethernet overhead length */
1398         if (dev_info.max_mtu != UINT16_MAX &&
1399             dev_info.max_rx_pktlen > dev_info.max_mtu)
1400                 overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
1401         else
1402                 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1403
1404         /* If number of queues specified by application for both Rx and Tx is
1405          * zero, use driver preferred values. This cannot be done individually
1406          * as it is valid for either Tx or Rx (but not both) to be zero.
1407          * If driver does not provide any preferred valued, fall back on
1408          * EAL defaults.
1409          */
1410         if (nb_rx_q == 0 && nb_tx_q == 0) {
1411                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1412                 if (nb_rx_q == 0)
1413                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1414                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1415                 if (nb_tx_q == 0)
1416                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1417         }
1418
1419         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1420                 RTE_ETHDEV_LOG(ERR,
1421                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1422                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1423                 ret = -EINVAL;
1424                 goto rollback;
1425         }
1426
1427         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1428                 RTE_ETHDEV_LOG(ERR,
1429                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1430                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1431                 ret = -EINVAL;
1432                 goto rollback;
1433         }
1434
1435         /*
1436          * Check that the numbers of RX and TX queues are not greater
1437          * than the maximum number of RX and TX queues supported by the
1438          * configured device.
1439          */
1440         if (nb_rx_q > dev_info.max_rx_queues) {
1441                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1442                         port_id, nb_rx_q, dev_info.max_rx_queues);
1443                 ret = -EINVAL;
1444                 goto rollback;
1445         }
1446
1447         if (nb_tx_q > dev_info.max_tx_queues) {
1448                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1449                         port_id, nb_tx_q, dev_info.max_tx_queues);
1450                 ret = -EINVAL;
1451                 goto rollback;
1452         }
1453
1454         /* Check that the device supports requested interrupts */
1455         if ((dev_conf->intr_conf.lsc == 1) &&
1456                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1457                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1458                         dev->device->driver->name);
1459                 ret = -EINVAL;
1460                 goto rollback;
1461         }
1462         if ((dev_conf->intr_conf.rmv == 1) &&
1463                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1464                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1465                         dev->device->driver->name);
1466                 ret = -EINVAL;
1467                 goto rollback;
1468         }
1469
1470         /*
1471          * If jumbo frames are enabled, check that the maximum RX packet
1472          * length is supported by the configured device.
1473          */
1474         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1475                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1476                         RTE_ETHDEV_LOG(ERR,
1477                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1478                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1479                                 dev_info.max_rx_pktlen);
1480                         ret = -EINVAL;
1481                         goto rollback;
1482                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1483                         RTE_ETHDEV_LOG(ERR,
1484                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1485                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1486                                 (unsigned int)RTE_ETHER_MIN_LEN);
1487                         ret = -EINVAL;
1488                         goto rollback;
1489                 }
1490
1491                 /* Scale the MTU size to adapt max_rx_pkt_len */
1492                 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
1493                                 overhead_len;
1494         } else {
1495                 uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
1496                 if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
1497                     pktlen > RTE_ETHER_MTU + overhead_len)
1498                         /* Use default value */
1499                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1500                                                 RTE_ETHER_MTU + overhead_len;
1501         }
1502
1503         /*
1504          * If LRO is enabled, check that the maximum aggregated packet
1505          * size is supported by the configured device.
1506          */
1507         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1508                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1509                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1510                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1511                 ret = eth_dev_check_lro_pkt_size(port_id,
1512                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1513                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1514                                 dev_info.max_lro_pkt_size);
1515                 if (ret != 0)
1516                         goto rollback;
1517         }
1518
1519         /* Any requested offloading must be within its device capabilities */
1520         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1521              dev_conf->rxmode.offloads) {
1522                 RTE_ETHDEV_LOG(ERR,
1523                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1524                         "capabilities 0x%"PRIx64" in %s()\n",
1525                         port_id, dev_conf->rxmode.offloads,
1526                         dev_info.rx_offload_capa,
1527                         __func__);
1528                 ret = -EINVAL;
1529                 goto rollback;
1530         }
1531         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1532              dev_conf->txmode.offloads) {
1533                 RTE_ETHDEV_LOG(ERR,
1534                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1535                         "capabilities 0x%"PRIx64" in %s()\n",
1536                         port_id, dev_conf->txmode.offloads,
1537                         dev_info.tx_offload_capa,
1538                         __func__);
1539                 ret = -EINVAL;
1540                 goto rollback;
1541         }
1542
1543         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1544                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1545
1546         /* Check that device supports requested rss hash functions. */
1547         if ((dev_info.flow_type_rss_offloads |
1548              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1549             dev_info.flow_type_rss_offloads) {
1550                 RTE_ETHDEV_LOG(ERR,
1551                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1552                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1553                         dev_info.flow_type_rss_offloads);
1554                 ret = -EINVAL;
1555                 goto rollback;
1556         }
1557
1558         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1559         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1560             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1561                 RTE_ETHDEV_LOG(ERR,
1562                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1563                         port_id,
1564                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1565                 ret = -EINVAL;
1566                 goto rollback;
1567         }
1568
1569         /*
1570          * Setup new number of RX/TX queues and reconfigure device.
1571          */
1572         diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1573         if (diag != 0) {
1574                 RTE_ETHDEV_LOG(ERR,
1575                         "Port%u eth_dev_rx_queue_config = %d\n",
1576                         port_id, diag);
1577                 ret = diag;
1578                 goto rollback;
1579         }
1580
1581         diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1582         if (diag != 0) {
1583                 RTE_ETHDEV_LOG(ERR,
1584                         "Port%u eth_dev_tx_queue_config = %d\n",
1585                         port_id, diag);
1586                 eth_dev_rx_queue_config(dev, 0);
1587                 ret = diag;
1588                 goto rollback;
1589         }
1590
1591         diag = (*dev->dev_ops->dev_configure)(dev);
1592         if (diag != 0) {
1593                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1594                         port_id, diag);
1595                 ret = eth_err(port_id, diag);
1596                 goto reset_queues;
1597         }
1598
1599         /* Initialize Rx profiling if enabled at compilation time. */
1600         diag = __rte_eth_dev_profile_init(port_id, dev);
1601         if (diag != 0) {
1602                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1603                         port_id, diag);
1604                 ret = eth_err(port_id, diag);
1605                 goto reset_queues;
1606         }
1607
1608         /* Validate Rx offloads. */
1609         diag = eth_dev_validate_offloads(port_id,
1610                         dev_conf->rxmode.offloads,
1611                         dev->data->dev_conf.rxmode.offloads, "Rx",
1612                         rte_eth_dev_rx_offload_name);
1613         if (diag != 0) {
1614                 ret = diag;
1615                 goto reset_queues;
1616         }
1617
1618         /* Validate Tx offloads. */
1619         diag = eth_dev_validate_offloads(port_id,
1620                         dev_conf->txmode.offloads,
1621                         dev->data->dev_conf.txmode.offloads, "Tx",
1622                         rte_eth_dev_tx_offload_name);
1623         if (diag != 0) {
1624                 ret = diag;
1625                 goto reset_queues;
1626         }
1627
1628         dev->data->dev_configured = 1;
1629         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1630         return 0;
1631 reset_queues:
1632         eth_dev_rx_queue_config(dev, 0);
1633         eth_dev_tx_queue_config(dev, 0);
1634 rollback:
1635         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1636         if (old_mtu != dev->data->mtu)
1637                 dev->data->mtu = old_mtu;
1638
1639         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1640         return ret;
1641 }
1642
1643 void
1644 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1645 {
1646         if (dev->data->dev_started) {
1647                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1648                         dev->data->port_id);
1649                 return;
1650         }
1651
1652         eth_dev_rx_queue_config(dev, 0);
1653         eth_dev_tx_queue_config(dev, 0);
1654
1655         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1656 }
1657
1658 static void
1659 eth_dev_mac_restore(struct rte_eth_dev *dev,
1660                         struct rte_eth_dev_info *dev_info)
1661 {
1662         struct rte_ether_addr *addr;
1663         uint16_t i;
1664         uint32_t pool = 0;
1665         uint64_t pool_mask;
1666
1667         /* replay MAC address configuration including default MAC */
1668         addr = &dev->data->mac_addrs[0];
1669         if (*dev->dev_ops->mac_addr_set != NULL)
1670                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1671         else if (*dev->dev_ops->mac_addr_add != NULL)
1672                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1673
1674         if (*dev->dev_ops->mac_addr_add != NULL) {
1675                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1676                         addr = &dev->data->mac_addrs[i];
1677
1678                         /* skip zero address */
1679                         if (rte_is_zero_ether_addr(addr))
1680                                 continue;
1681
1682                         pool = 0;
1683                         pool_mask = dev->data->mac_pool_sel[i];
1684
1685                         do {
1686                                 if (pool_mask & 1ULL)
1687                                         (*dev->dev_ops->mac_addr_add)(dev,
1688                                                 addr, i, pool);
1689                                 pool_mask >>= 1;
1690                                 pool++;
1691                         } while (pool_mask);
1692                 }
1693         }
1694 }
1695
1696 static int
1697 eth_dev_config_restore(struct rte_eth_dev *dev,
1698                 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1699 {
1700         int ret;
1701
1702         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1703                 eth_dev_mac_restore(dev, dev_info);
1704
1705         /* replay promiscuous configuration */
1706         /*
1707          * use callbacks directly since we don't need port_id check and
1708          * would like to bypass the same value set
1709          */
1710         if (rte_eth_promiscuous_get(port_id) == 1 &&
1711             *dev->dev_ops->promiscuous_enable != NULL) {
1712                 ret = eth_err(port_id,
1713                               (*dev->dev_ops->promiscuous_enable)(dev));
1714                 if (ret != 0 && ret != -ENOTSUP) {
1715                         RTE_ETHDEV_LOG(ERR,
1716                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1717                                 port_id, rte_strerror(-ret));
1718                         return ret;
1719                 }
1720         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1721                    *dev->dev_ops->promiscuous_disable != NULL) {
1722                 ret = eth_err(port_id,
1723                               (*dev->dev_ops->promiscuous_disable)(dev));
1724                 if (ret != 0 && ret != -ENOTSUP) {
1725                         RTE_ETHDEV_LOG(ERR,
1726                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1727                                 port_id, rte_strerror(-ret));
1728                         return ret;
1729                 }
1730         }
1731
1732         /* replay all multicast configuration */
1733         /*
1734          * use callbacks directly since we don't need port_id check and
1735          * would like to bypass the same value set
1736          */
1737         if (rte_eth_allmulticast_get(port_id) == 1 &&
1738             *dev->dev_ops->allmulticast_enable != NULL) {
1739                 ret = eth_err(port_id,
1740                               (*dev->dev_ops->allmulticast_enable)(dev));
1741                 if (ret != 0 && ret != -ENOTSUP) {
1742                         RTE_ETHDEV_LOG(ERR,
1743                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1744                                 port_id, rte_strerror(-ret));
1745                         return ret;
1746                 }
1747         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1748                    *dev->dev_ops->allmulticast_disable != NULL) {
1749                 ret = eth_err(port_id,
1750                               (*dev->dev_ops->allmulticast_disable)(dev));
1751                 if (ret != 0 && ret != -ENOTSUP) {
1752                         RTE_ETHDEV_LOG(ERR,
1753                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1754                                 port_id, rte_strerror(-ret));
1755                         return ret;
1756                 }
1757         }
1758
1759         return 0;
1760 }
1761
1762 int
1763 rte_eth_dev_start(uint16_t port_id)
1764 {
1765         struct rte_eth_dev *dev;
1766         struct rte_eth_dev_info dev_info;
1767         int diag;
1768         int ret, ret_stop;
1769
1770         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1771         dev = &rte_eth_devices[port_id];
1772
1773         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1774
1775         if (dev->data->dev_configured == 0) {
1776                 RTE_ETHDEV_LOG(INFO,
1777                         "Device with port_id=%"PRIu16" is not configured.\n",
1778                         port_id);
1779                 return -EINVAL;
1780         }
1781
1782         if (dev->data->dev_started != 0) {
1783                 RTE_ETHDEV_LOG(INFO,
1784                         "Device with port_id=%"PRIu16" already started\n",
1785                         port_id);
1786                 return 0;
1787         }
1788
1789         ret = rte_eth_dev_info_get(port_id, &dev_info);
1790         if (ret != 0)
1791                 return ret;
1792
1793         /* Lets restore MAC now if device does not support live change */
1794         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1795                 eth_dev_mac_restore(dev, &dev_info);
1796
1797         diag = (*dev->dev_ops->dev_start)(dev);
1798         if (diag == 0)
1799                 dev->data->dev_started = 1;
1800         else
1801                 return eth_err(port_id, diag);
1802
1803         ret = eth_dev_config_restore(dev, &dev_info, port_id);
1804         if (ret != 0) {
1805                 RTE_ETHDEV_LOG(ERR,
1806                         "Error during restoring configuration for device (port %u): %s\n",
1807                         port_id, rte_strerror(-ret));
1808                 ret_stop = rte_eth_dev_stop(port_id);
1809                 if (ret_stop != 0) {
1810                         RTE_ETHDEV_LOG(ERR,
1811                                 "Failed to stop device (port %u): %s\n",
1812                                 port_id, rte_strerror(-ret_stop));
1813                 }
1814
1815                 return ret;
1816         }
1817
1818         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1819                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1820                 (*dev->dev_ops->link_update)(dev, 0);
1821         }
1822
1823         rte_ethdev_trace_start(port_id);
1824         return 0;
1825 }
1826
1827 int
1828 rte_eth_dev_stop(uint16_t port_id)
1829 {
1830         struct rte_eth_dev *dev;
1831         int ret;
1832
1833         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1834         dev = &rte_eth_devices[port_id];
1835
1836         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1837
1838         if (dev->data->dev_started == 0) {
1839                 RTE_ETHDEV_LOG(INFO,
1840                         "Device with port_id=%"PRIu16" already stopped\n",
1841                         port_id);
1842                 return 0;
1843         }
1844
1845         dev->data->dev_started = 0;
1846         ret = (*dev->dev_ops->dev_stop)(dev);
1847         rte_ethdev_trace_stop(port_id, ret);
1848
1849         return ret;
1850 }
1851
1852 int
1853 rte_eth_dev_set_link_up(uint16_t port_id)
1854 {
1855         struct rte_eth_dev *dev;
1856
1857         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1858         dev = &rte_eth_devices[port_id];
1859
1860         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1861         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1862 }
1863
1864 int
1865 rte_eth_dev_set_link_down(uint16_t port_id)
1866 {
1867         struct rte_eth_dev *dev;
1868
1869         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1870         dev = &rte_eth_devices[port_id];
1871
1872         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1873         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1874 }
1875
1876 int
1877 rte_eth_dev_close(uint16_t port_id)
1878 {
1879         struct rte_eth_dev *dev;
1880         int firsterr, binerr;
1881         int *lasterr = &firsterr;
1882
1883         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1884         dev = &rte_eth_devices[port_id];
1885
1886         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1887         *lasterr = (*dev->dev_ops->dev_close)(dev);
1888         if (*lasterr != 0)
1889                 lasterr = &binerr;
1890
1891         rte_ethdev_trace_close(port_id);
1892         *lasterr = rte_eth_dev_release_port(dev);
1893
1894         return firsterr;
1895 }
1896
1897 int
1898 rte_eth_dev_reset(uint16_t port_id)
1899 {
1900         struct rte_eth_dev *dev;
1901         int ret;
1902
1903         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1904         dev = &rte_eth_devices[port_id];
1905
1906         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1907
1908         ret = rte_eth_dev_stop(port_id);
1909         if (ret != 0) {
1910                 RTE_ETHDEV_LOG(ERR,
1911                         "Failed to stop device (port %u) before reset: %s - ignore\n",
1912                         port_id, rte_strerror(-ret));
1913         }
1914         ret = dev->dev_ops->dev_reset(dev);
1915
1916         return eth_err(port_id, ret);
1917 }
1918
1919 int
1920 rte_eth_dev_is_removed(uint16_t port_id)
1921 {
1922         struct rte_eth_dev *dev;
1923         int ret;
1924
1925         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1926         dev = &rte_eth_devices[port_id];
1927
1928         if (dev->state == RTE_ETH_DEV_REMOVED)
1929                 return 1;
1930
1931         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1932
1933         ret = dev->dev_ops->is_removed(dev);
1934         if (ret != 0)
1935                 /* Device is physically removed. */
1936                 dev->state = RTE_ETH_DEV_REMOVED;
1937
1938         return ret;
1939 }
1940
1941 static int
1942 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1943                              uint16_t n_seg, uint32_t *mbp_buf_size,
1944                              const struct rte_eth_dev_info *dev_info)
1945 {
1946         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1947         struct rte_mempool *mp_first;
1948         uint32_t offset_mask;
1949         uint16_t seg_idx;
1950
1951         if (n_seg > seg_capa->max_nseg) {
1952                 RTE_ETHDEV_LOG(ERR,
1953                                "Requested Rx segments %u exceed supported %u\n",
1954                                n_seg, seg_capa->max_nseg);
1955                 return -EINVAL;
1956         }
1957         /*
1958          * Check the sizes and offsets against buffer sizes
1959          * for each segment specified in extended configuration.
1960          */
1961         mp_first = rx_seg[0].mp;
1962         offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1963         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1964                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1965                 uint32_t length = rx_seg[seg_idx].length;
1966                 uint32_t offset = rx_seg[seg_idx].offset;
1967
1968                 if (mpl == NULL) {
1969                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1970                         return -EINVAL;
1971                 }
1972                 if (seg_idx != 0 && mp_first != mpl &&
1973                     seg_capa->multi_pools == 0) {
1974                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1975                         return -ENOTSUP;
1976                 }
1977                 if (offset != 0) {
1978                         if (seg_capa->offset_allowed == 0) {
1979                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1980                                 return -ENOTSUP;
1981                         }
1982                         if (offset & offset_mask) {
1983                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1984                                                offset,
1985                                                seg_capa->offset_align_log2);
1986                                 return -EINVAL;
1987                         }
1988                 }
1989                 if (mpl->private_data_size <
1990                         sizeof(struct rte_pktmbuf_pool_private)) {
1991                         RTE_ETHDEV_LOG(ERR,
1992                                        "%s private_data_size %u < %u\n",
1993                                        mpl->name, mpl->private_data_size,
1994                                        (unsigned int)sizeof
1995                                         (struct rte_pktmbuf_pool_private));
1996                         return -ENOSPC;
1997                 }
1998                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1999                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
2000                 length = length != 0 ? length : *mbp_buf_size;
2001                 if (*mbp_buf_size < length + offset) {
2002                         RTE_ETHDEV_LOG(ERR,
2003                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
2004                                        mpl->name, *mbp_buf_size,
2005                                        length + offset, length, offset);
2006                         return -EINVAL;
2007                 }
2008         }
2009         return 0;
2010 }
2011
2012 int
2013 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2014                        uint16_t nb_rx_desc, unsigned int socket_id,
2015                        const struct rte_eth_rxconf *rx_conf,
2016                        struct rte_mempool *mp)
2017 {
2018         int ret;
2019         uint32_t mbp_buf_size;
2020         struct rte_eth_dev *dev;
2021         struct rte_eth_dev_info dev_info;
2022         struct rte_eth_rxconf local_conf;
2023
2024         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2025         dev = &rte_eth_devices[port_id];
2026
2027         if (rx_queue_id >= dev->data->nb_rx_queues) {
2028                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2029                 return -EINVAL;
2030         }
2031
2032         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
2033
2034         ret = rte_eth_dev_info_get(port_id, &dev_info);
2035         if (ret != 0)
2036                 return ret;
2037
2038         if (mp != NULL) {
2039                 /* Single pool configuration check. */
2040                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
2041                         RTE_ETHDEV_LOG(ERR,
2042                                        "Ambiguous segment configuration\n");
2043                         return -EINVAL;
2044                 }
2045                 /*
2046                  * Check the size of the mbuf data buffer, this value
2047                  * must be provided in the private data of the memory pool.
2048                  * First check that the memory pool(s) has a valid private data.
2049                  */
2050                 if (mp->private_data_size <
2051                                 sizeof(struct rte_pktmbuf_pool_private)) {
2052                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
2053                                 mp->name, mp->private_data_size,
2054                                 (unsigned int)
2055                                 sizeof(struct rte_pktmbuf_pool_private));
2056                         return -ENOSPC;
2057                 }
2058                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
2059                 if (mbp_buf_size < dev_info.min_rx_bufsize +
2060                                    RTE_PKTMBUF_HEADROOM) {
2061                         RTE_ETHDEV_LOG(ERR,
2062                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
2063                                        mp->name, mbp_buf_size,
2064                                        RTE_PKTMBUF_HEADROOM +
2065                                        dev_info.min_rx_bufsize,
2066                                        RTE_PKTMBUF_HEADROOM,
2067                                        dev_info.min_rx_bufsize);
2068                         return -EINVAL;
2069                 }
2070         } else {
2071                 const struct rte_eth_rxseg_split *rx_seg;
2072                 uint16_t n_seg;
2073
2074                 /* Extended multi-segment configuration check. */
2075                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
2076                         RTE_ETHDEV_LOG(ERR,
2077                                        "Memory pool is null and no extended configuration provided\n");
2078                         return -EINVAL;
2079                 }
2080
2081                 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2082                 n_seg = rx_conf->rx_nseg;
2083
2084                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2085                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
2086                                                            &mbp_buf_size,
2087                                                            &dev_info);
2088                         if (ret != 0)
2089                                 return ret;
2090                 } else {
2091                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2092                         return -EINVAL;
2093                 }
2094         }
2095
2096         /* Use default specified by driver, if nb_rx_desc is zero */
2097         if (nb_rx_desc == 0) {
2098                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
2099                 /* If driver default is also zero, fall back on EAL default */
2100                 if (nb_rx_desc == 0)
2101                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2102         }
2103
2104         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2105                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2106                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2107
2108                 RTE_ETHDEV_LOG(ERR,
2109                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2110                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2111                         dev_info.rx_desc_lim.nb_min,
2112                         dev_info.rx_desc_lim.nb_align);
2113                 return -EINVAL;
2114         }
2115
2116         if (dev->data->dev_started &&
2117                 !(dev_info.dev_capa &
2118                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2119                 return -EBUSY;
2120
2121         if (dev->data->dev_started &&
2122                 (dev->data->rx_queue_state[rx_queue_id] !=
2123                         RTE_ETH_QUEUE_STATE_STOPPED))
2124                 return -EBUSY;
2125
2126         eth_dev_rxq_release(dev, rx_queue_id);
2127
2128         if (rx_conf == NULL)
2129                 rx_conf = &dev_info.default_rxconf;
2130
2131         local_conf = *rx_conf;
2132
2133         /*
2134          * If an offloading has already been enabled in
2135          * rte_eth_dev_configure(), it has been enabled on all queues,
2136          * so there is no need to enable it in this queue again.
2137          * The local_conf.offloads input to underlying PMD only carries
2138          * those offloadings which are only enabled on this queue and
2139          * not enabled on all queues.
2140          */
2141         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2142
2143         /*
2144          * New added offloadings for this queue are those not enabled in
2145          * rte_eth_dev_configure() and they must be per-queue type.
2146          * A pure per-port offloading can't be enabled on a queue while
2147          * disabled on another queue. A pure per-port offloading can't
2148          * be enabled for any queue as new added one if it hasn't been
2149          * enabled in rte_eth_dev_configure().
2150          */
2151         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2152              local_conf.offloads) {
2153                 RTE_ETHDEV_LOG(ERR,
2154                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2155                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2156                         port_id, rx_queue_id, local_conf.offloads,
2157                         dev_info.rx_queue_offload_capa,
2158                         __func__);
2159                 return -EINVAL;
2160         }
2161
2162         /*
2163          * If LRO is enabled, check that the maximum aggregated packet
2164          * size is supported by the configured device.
2165          */
2166         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2167                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2168                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
2169                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2170                 int ret = eth_dev_check_lro_pkt_size(port_id,
2171                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2172                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2173                                 dev_info.max_lro_pkt_size);
2174                 if (ret != 0)
2175                         return ret;
2176         }
2177
2178         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2179                                               socket_id, &local_conf, mp);
2180         if (!ret) {
2181                 if (!dev->data->min_rx_buf_size ||
2182                     dev->data->min_rx_buf_size > mbp_buf_size)
2183                         dev->data->min_rx_buf_size = mbp_buf_size;
2184         }
2185
2186         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2187                 rx_conf, ret);
2188         return eth_err(port_id, ret);
2189 }
2190
2191 int
2192 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2193                                uint16_t nb_rx_desc,
2194                                const struct rte_eth_hairpin_conf *conf)
2195 {
2196         int ret;
2197         struct rte_eth_dev *dev;
2198         struct rte_eth_hairpin_cap cap;
2199         int i;
2200         int count;
2201
2202         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2203         dev = &rte_eth_devices[port_id];
2204
2205         if (rx_queue_id >= dev->data->nb_rx_queues) {
2206                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2207                 return -EINVAL;
2208         }
2209
2210         if (conf == NULL) {
2211                 RTE_ETHDEV_LOG(ERR,
2212                         "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
2213                         port_id);
2214                 return -EINVAL;
2215         }
2216
2217         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2218         if (ret != 0)
2219                 return ret;
2220         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2221                                 -ENOTSUP);
2222         /* if nb_rx_desc is zero use max number of desc from the driver. */
2223         if (nb_rx_desc == 0)
2224                 nb_rx_desc = cap.max_nb_desc;
2225         if (nb_rx_desc > cap.max_nb_desc) {
2226                 RTE_ETHDEV_LOG(ERR,
2227                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2228                         nb_rx_desc, cap.max_nb_desc);
2229                 return -EINVAL;
2230         }
2231         if (conf->peer_count > cap.max_rx_2_tx) {
2232                 RTE_ETHDEV_LOG(ERR,
2233                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2234                         conf->peer_count, cap.max_rx_2_tx);
2235                 return -EINVAL;
2236         }
2237         if (conf->peer_count == 0) {
2238                 RTE_ETHDEV_LOG(ERR,
2239                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2240                         conf->peer_count);
2241                 return -EINVAL;
2242         }
2243         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2244              cap.max_nb_queues != UINT16_MAX; i++) {
2245                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2246                         count++;
2247         }
2248         if (count > cap.max_nb_queues) {
2249                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2250                 cap.max_nb_queues);
2251                 return -EINVAL;
2252         }
2253         if (dev->data->dev_started)
2254                 return -EBUSY;
2255         eth_dev_rxq_release(dev, rx_queue_id);
2256         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2257                                                       nb_rx_desc, conf);
2258         if (ret == 0)
2259                 dev->data->rx_queue_state[rx_queue_id] =
2260                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2261         return eth_err(port_id, ret);
2262 }
2263
2264 int
2265 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2266                        uint16_t nb_tx_desc, unsigned int socket_id,
2267                        const struct rte_eth_txconf *tx_conf)
2268 {
2269         struct rte_eth_dev *dev;
2270         struct rte_eth_dev_info dev_info;
2271         struct rte_eth_txconf local_conf;
2272         int ret;
2273
2274         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2275         dev = &rte_eth_devices[port_id];
2276
2277         if (tx_queue_id >= dev->data->nb_tx_queues) {
2278                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2279                 return -EINVAL;
2280         }
2281
2282         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2283
2284         ret = rte_eth_dev_info_get(port_id, &dev_info);
2285         if (ret != 0)
2286                 return ret;
2287
2288         /* Use default specified by driver, if nb_tx_desc is zero */
2289         if (nb_tx_desc == 0) {
2290                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2291                 /* If driver default is zero, fall back on EAL default */
2292                 if (nb_tx_desc == 0)
2293                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2294         }
2295         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2296             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2297             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2298                 RTE_ETHDEV_LOG(ERR,
2299                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2300                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2301                         dev_info.tx_desc_lim.nb_min,
2302                         dev_info.tx_desc_lim.nb_align);
2303                 return -EINVAL;
2304         }
2305
2306         if (dev->data->dev_started &&
2307                 !(dev_info.dev_capa &
2308                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2309                 return -EBUSY;
2310
2311         if (dev->data->dev_started &&
2312                 (dev->data->tx_queue_state[tx_queue_id] !=
2313                         RTE_ETH_QUEUE_STATE_STOPPED))
2314                 return -EBUSY;
2315
2316         eth_dev_txq_release(dev, tx_queue_id);
2317
2318         if (tx_conf == NULL)
2319                 tx_conf = &dev_info.default_txconf;
2320
2321         local_conf = *tx_conf;
2322
2323         /*
2324          * If an offloading has already been enabled in
2325          * rte_eth_dev_configure(), it has been enabled on all queues,
2326          * so there is no need to enable it in this queue again.
2327          * The local_conf.offloads input to underlying PMD only carries
2328          * those offloadings which are only enabled on this queue and
2329          * not enabled on all queues.
2330          */
2331         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2332
2333         /*
2334          * New added offloadings for this queue are those not enabled in
2335          * rte_eth_dev_configure() and they must be per-queue type.
2336          * A pure per-port offloading can't be enabled on a queue while
2337          * disabled on another queue. A pure per-port offloading can't
2338          * be enabled for any queue as new added one if it hasn't been
2339          * enabled in rte_eth_dev_configure().
2340          */
2341         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2342              local_conf.offloads) {
2343                 RTE_ETHDEV_LOG(ERR,
2344                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2345                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2346                         port_id, tx_queue_id, local_conf.offloads,
2347                         dev_info.tx_queue_offload_capa,
2348                         __func__);
2349                 return -EINVAL;
2350         }
2351
2352         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2353         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2354                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2355 }
2356
2357 int
2358 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2359                                uint16_t nb_tx_desc,
2360                                const struct rte_eth_hairpin_conf *conf)
2361 {
2362         struct rte_eth_dev *dev;
2363         struct rte_eth_hairpin_cap cap;
2364         int i;
2365         int count;
2366         int ret;
2367
2368         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2369         dev = &rte_eth_devices[port_id];
2370
2371         if (tx_queue_id >= dev->data->nb_tx_queues) {
2372                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2373                 return -EINVAL;
2374         }
2375
2376         if (conf == NULL) {
2377                 RTE_ETHDEV_LOG(ERR,
2378                         "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2379                         port_id);
2380                 return -EINVAL;
2381         }
2382
2383         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2384         if (ret != 0)
2385                 return ret;
2386         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2387                                 -ENOTSUP);
2388         /* if nb_rx_desc is zero use max number of desc from the driver. */
2389         if (nb_tx_desc == 0)
2390                 nb_tx_desc = cap.max_nb_desc;
2391         if (nb_tx_desc > cap.max_nb_desc) {
2392                 RTE_ETHDEV_LOG(ERR,
2393                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2394                         nb_tx_desc, cap.max_nb_desc);
2395                 return -EINVAL;
2396         }
2397         if (conf->peer_count > cap.max_tx_2_rx) {
2398                 RTE_ETHDEV_LOG(ERR,
2399                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2400                         conf->peer_count, cap.max_tx_2_rx);
2401                 return -EINVAL;
2402         }
2403         if (conf->peer_count == 0) {
2404                 RTE_ETHDEV_LOG(ERR,
2405                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2406                         conf->peer_count);
2407                 return -EINVAL;
2408         }
2409         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2410              cap.max_nb_queues != UINT16_MAX; i++) {
2411                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2412                         count++;
2413         }
2414         if (count > cap.max_nb_queues) {
2415                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2416                 cap.max_nb_queues);
2417                 return -EINVAL;
2418         }
2419         if (dev->data->dev_started)
2420                 return -EBUSY;
2421         eth_dev_txq_release(dev, tx_queue_id);
2422         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2423                 (dev, tx_queue_id, nb_tx_desc, conf);
2424         if (ret == 0)
2425                 dev->data->tx_queue_state[tx_queue_id] =
2426                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2427         return eth_err(port_id, ret);
2428 }
2429
2430 int
2431 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2432 {
2433         struct rte_eth_dev *dev;
2434         int ret;
2435
2436         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2437         dev = &rte_eth_devices[tx_port];
2438
2439         if (dev->data->dev_started == 0) {
2440                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2441                 return -EBUSY;
2442         }
2443
2444         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2445         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2446         if (ret != 0)
2447                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2448                                " to Rx %d (%d - all ports)\n",
2449                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2450
2451         return ret;
2452 }
2453
2454 int
2455 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2456 {
2457         struct rte_eth_dev *dev;
2458         int ret;
2459
2460         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2461         dev = &rte_eth_devices[tx_port];
2462
2463         if (dev->data->dev_started == 0) {
2464                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2465                 return -EBUSY;
2466         }
2467
2468         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2469         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2470         if (ret != 0)
2471                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2472                                " from Rx %d (%d - all ports)\n",
2473                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2474
2475         return ret;
2476 }
2477
2478 int
2479 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2480                                size_t len, uint32_t direction)
2481 {
2482         struct rte_eth_dev *dev;
2483         int ret;
2484
2485         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2486         dev = &rte_eth_devices[port_id];
2487
2488         if (peer_ports == NULL) {
2489                 RTE_ETHDEV_LOG(ERR,
2490                         "Cannot get ethdev port %u hairpin peer ports to NULL\n",
2491                         port_id);
2492                 return -EINVAL;
2493         }
2494
2495         if (len == 0) {
2496                 RTE_ETHDEV_LOG(ERR,
2497                         "Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2498                         port_id);
2499                 return -EINVAL;
2500         }
2501
2502         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2503                                 -ENOTSUP);
2504
2505         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2506                                                       len, direction);
2507         if (ret < 0)
2508                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2509                                port_id, direction ? "Rx" : "Tx");
2510
2511         return ret;
2512 }
2513
2514 void
2515 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2516                 void *userdata __rte_unused)
2517 {
2518         rte_pktmbuf_free_bulk(pkts, unsent);
2519 }
2520
2521 void
2522 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2523                 void *userdata)
2524 {
2525         uint64_t *count = userdata;
2526
2527         rte_pktmbuf_free_bulk(pkts, unsent);
2528         *count += unsent;
2529 }
2530
2531 int
2532 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2533                 buffer_tx_error_fn cbfn, void *userdata)
2534 {
2535         if (buffer == NULL) {
2536                 RTE_ETHDEV_LOG(ERR,
2537                         "Cannot set Tx buffer error callback to NULL buffer\n");
2538                 return -EINVAL;
2539         }
2540
2541         buffer->error_callback = cbfn;
2542         buffer->error_userdata = userdata;
2543         return 0;
2544 }
2545
2546 int
2547 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2548 {
2549         int ret = 0;
2550
2551         if (buffer == NULL) {
2552                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2553                 return -EINVAL;
2554         }
2555
2556         buffer->size = size;
2557         if (buffer->error_callback == NULL) {
2558                 ret = rte_eth_tx_buffer_set_err_callback(
2559                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2560         }
2561
2562         return ret;
2563 }
2564
2565 int
2566 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2567 {
2568         struct rte_eth_dev *dev;
2569         int ret;
2570
2571         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2572         dev = &rte_eth_devices[port_id];
2573
2574         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2575
2576         /* Call driver to free pending mbufs. */
2577         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2578                                                free_cnt);
2579         return eth_err(port_id, ret);
2580 }
2581
2582 int
2583 rte_eth_promiscuous_enable(uint16_t port_id)
2584 {
2585         struct rte_eth_dev *dev;
2586         int diag = 0;
2587
2588         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2589         dev = &rte_eth_devices[port_id];
2590
2591         if (dev->data->promiscuous == 1)
2592                 return 0;
2593
2594         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2595
2596         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2597         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2598
2599         return eth_err(port_id, diag);
2600 }
2601
2602 int
2603 rte_eth_promiscuous_disable(uint16_t port_id)
2604 {
2605         struct rte_eth_dev *dev;
2606         int diag = 0;
2607
2608         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2609         dev = &rte_eth_devices[port_id];
2610
2611         if (dev->data->promiscuous == 0)
2612                 return 0;
2613
2614         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2615
2616         dev->data->promiscuous = 0;
2617         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2618         if (diag != 0)
2619                 dev->data->promiscuous = 1;
2620
2621         return eth_err(port_id, diag);
2622 }
2623
2624 int
2625 rte_eth_promiscuous_get(uint16_t port_id)
2626 {
2627         struct rte_eth_dev *dev;
2628
2629         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2630         dev = &rte_eth_devices[port_id];
2631
2632         return dev->data->promiscuous;
2633 }
2634
2635 int
2636 rte_eth_allmulticast_enable(uint16_t port_id)
2637 {
2638         struct rte_eth_dev *dev;
2639         int diag;
2640
2641         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2642         dev = &rte_eth_devices[port_id];
2643
2644         if (dev->data->all_multicast == 1)
2645                 return 0;
2646
2647         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2648         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2649         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2650
2651         return eth_err(port_id, diag);
2652 }
2653
2654 int
2655 rte_eth_allmulticast_disable(uint16_t port_id)
2656 {
2657         struct rte_eth_dev *dev;
2658         int diag;
2659
2660         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2661         dev = &rte_eth_devices[port_id];
2662
2663         if (dev->data->all_multicast == 0)
2664                 return 0;
2665
2666         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2667         dev->data->all_multicast = 0;
2668         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2669         if (diag != 0)
2670                 dev->data->all_multicast = 1;
2671
2672         return eth_err(port_id, diag);
2673 }
2674
2675 int
2676 rte_eth_allmulticast_get(uint16_t port_id)
2677 {
2678         struct rte_eth_dev *dev;
2679
2680         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2681         dev = &rte_eth_devices[port_id];
2682
2683         return dev->data->all_multicast;
2684 }
2685
2686 int
2687 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2688 {
2689         struct rte_eth_dev *dev;
2690
2691         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2692         dev = &rte_eth_devices[port_id];
2693
2694         if (eth_link == NULL) {
2695                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2696                         port_id);
2697                 return -EINVAL;
2698         }
2699
2700         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2701                 rte_eth_linkstatus_get(dev, eth_link);
2702         else {
2703                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2704                 (*dev->dev_ops->link_update)(dev, 1);
2705                 *eth_link = dev->data->dev_link;
2706         }
2707
2708         return 0;
2709 }
2710
2711 int
2712 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2713 {
2714         struct rte_eth_dev *dev;
2715
2716         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2717         dev = &rte_eth_devices[port_id];
2718
2719         if (eth_link == NULL) {
2720                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2721                         port_id);
2722                 return -EINVAL;
2723         }
2724
2725         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2726                 rte_eth_linkstatus_get(dev, eth_link);
2727         else {
2728                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2729                 (*dev->dev_ops->link_update)(dev, 0);
2730                 *eth_link = dev->data->dev_link;
2731         }
2732
2733         return 0;
2734 }
2735
2736 const char *
2737 rte_eth_link_speed_to_str(uint32_t link_speed)
2738 {
2739         switch (link_speed) {
2740         case ETH_SPEED_NUM_NONE: return "None";
2741         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2742         case ETH_SPEED_NUM_100M: return "100 Mbps";
2743         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2744         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2745         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2746         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2747         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2748         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2749         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2750         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2751         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2752         case ETH_SPEED_NUM_100G: return "100 Gbps";
2753         case ETH_SPEED_NUM_200G: return "200 Gbps";
2754         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2755         default: return "Invalid";
2756         }
2757 }
2758
2759 int
2760 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2761 {
2762         if (str == NULL) {
2763                 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2764                 return -EINVAL;
2765         }
2766
2767         if (len == 0) {
2768                 RTE_ETHDEV_LOG(ERR,
2769                         "Cannot convert link to string with zero size\n");
2770                 return -EINVAL;
2771         }
2772
2773         if (eth_link == NULL) {
2774                 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2775                 return -EINVAL;
2776         }
2777
2778         if (eth_link->link_status == ETH_LINK_DOWN)
2779                 return snprintf(str, len, "Link down");
2780         else
2781                 return snprintf(str, len, "Link up at %s %s %s",
2782                         rte_eth_link_speed_to_str(eth_link->link_speed),
2783                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2784                         "FDX" : "HDX",
2785                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2786                         "Autoneg" : "Fixed");
2787 }
2788
2789 int
2790 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2791 {
2792         struct rte_eth_dev *dev;
2793
2794         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2795         dev = &rte_eth_devices[port_id];
2796
2797         if (stats == NULL) {
2798                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2799                         port_id);
2800                 return -EINVAL;
2801         }
2802
2803         memset(stats, 0, sizeof(*stats));
2804
2805         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2806         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2807         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2808 }
2809
2810 int
2811 rte_eth_stats_reset(uint16_t port_id)
2812 {
2813         struct rte_eth_dev *dev;
2814         int ret;
2815
2816         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2817         dev = &rte_eth_devices[port_id];
2818
2819         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2820         ret = (*dev->dev_ops->stats_reset)(dev);
2821         if (ret != 0)
2822                 return eth_err(port_id, ret);
2823
2824         dev->data->rx_mbuf_alloc_failed = 0;
2825
2826         return 0;
2827 }
2828
2829 static inline int
2830 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2831 {
2832         uint16_t nb_rxqs, nb_txqs;
2833         int count;
2834
2835         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2836         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2837
2838         count = RTE_NB_STATS;
2839         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2840                 count += nb_rxqs * RTE_NB_RXQ_STATS;
2841                 count += nb_txqs * RTE_NB_TXQ_STATS;
2842         }
2843
2844         return count;
2845 }
2846
2847 static int
2848 eth_dev_get_xstats_count(uint16_t port_id)
2849 {
2850         struct rte_eth_dev *dev;
2851         int count;
2852
2853         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2854         dev = &rte_eth_devices[port_id];
2855         if (dev->dev_ops->xstats_get_names != NULL) {
2856                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2857                 if (count < 0)
2858                         return eth_err(port_id, count);
2859         } else
2860                 count = 0;
2861
2862
2863         count += eth_dev_get_xstats_basic_count(dev);
2864
2865         return count;
2866 }
2867
2868 int
2869 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2870                 uint64_t *id)
2871 {
2872         int cnt_xstats, idx_xstat;
2873
2874         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2875
2876         if (xstat_name == NULL) {
2877                 RTE_ETHDEV_LOG(ERR,
2878                         "Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2879                         port_id);
2880                 return -ENOMEM;
2881         }
2882
2883         if (id == NULL) {
2884                 RTE_ETHDEV_LOG(ERR,
2885                         "Cannot get ethdev port %u xstats ID to NULL\n",
2886                         port_id);
2887                 return -ENOMEM;
2888         }
2889
2890         /* Get count */
2891         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2892         if (cnt_xstats  < 0) {
2893                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2894                 return -ENODEV;
2895         }
2896
2897         /* Get id-name lookup table */
2898         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2899
2900         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2901                         port_id, xstats_names, cnt_xstats, NULL)) {
2902                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2903                 return -1;
2904         }
2905
2906         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2907                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2908                         *id = idx_xstat;
2909                         return 0;
2910                 };
2911         }
2912
2913         return -EINVAL;
2914 }
2915
2916 /* retrieve basic stats names */
2917 static int
2918 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2919         struct rte_eth_xstat_name *xstats_names)
2920 {
2921         int cnt_used_entries = 0;
2922         uint32_t idx, id_queue;
2923         uint16_t num_q;
2924
2925         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2926                 strlcpy(xstats_names[cnt_used_entries].name,
2927                         eth_dev_stats_strings[idx].name,
2928                         sizeof(xstats_names[0].name));
2929                 cnt_used_entries++;
2930         }
2931
2932         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2933                 return cnt_used_entries;
2934
2935         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2936         for (id_queue = 0; id_queue < num_q; id_queue++) {
2937                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2938                         snprintf(xstats_names[cnt_used_entries].name,
2939                                 sizeof(xstats_names[0].name),
2940                                 "rx_q%u_%s",
2941                                 id_queue, eth_dev_rxq_stats_strings[idx].name);
2942                         cnt_used_entries++;
2943                 }
2944
2945         }
2946         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2947         for (id_queue = 0; id_queue < num_q; id_queue++) {
2948                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2949                         snprintf(xstats_names[cnt_used_entries].name,
2950                                 sizeof(xstats_names[0].name),
2951                                 "tx_q%u_%s",
2952                                 id_queue, eth_dev_txq_stats_strings[idx].name);
2953                         cnt_used_entries++;
2954                 }
2955         }
2956         return cnt_used_entries;
2957 }
2958
2959 /* retrieve ethdev extended statistics names */
2960 int
2961 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2962         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2963         uint64_t *ids)
2964 {
2965         struct rte_eth_xstat_name *xstats_names_copy;
2966         unsigned int no_basic_stat_requested = 1;
2967         unsigned int no_ext_stat_requested = 1;
2968         unsigned int expected_entries;
2969         unsigned int basic_count;
2970         struct rte_eth_dev *dev;
2971         unsigned int i;
2972         int ret;
2973
2974         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2975         dev = &rte_eth_devices[port_id];
2976
2977         basic_count = eth_dev_get_xstats_basic_count(dev);
2978         ret = eth_dev_get_xstats_count(port_id);
2979         if (ret < 0)
2980                 return ret;
2981         expected_entries = (unsigned int)ret;
2982
2983         /* Return max number of stats if no ids given */
2984         if (!ids) {
2985                 if (!xstats_names)
2986                         return expected_entries;
2987                 else if (xstats_names && size < expected_entries)
2988                         return expected_entries;
2989         }
2990
2991         if (ids && !xstats_names)
2992                 return -EINVAL;
2993
2994         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2995                 uint64_t ids_copy[size];
2996
2997                 for (i = 0; i < size; i++) {
2998                         if (ids[i] < basic_count) {
2999                                 no_basic_stat_requested = 0;
3000                                 break;
3001                         }
3002
3003                         /*
3004                          * Convert ids to xstats ids that PMD knows.
3005                          * ids known by user are basic + extended stats.
3006                          */
3007                         ids_copy[i] = ids[i] - basic_count;
3008                 }
3009
3010                 if (no_basic_stat_requested)
3011                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
3012                                         ids_copy, xstats_names, size);
3013         }
3014
3015         /* Retrieve all stats */
3016         if (!ids) {
3017                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
3018                                 expected_entries);
3019                 if (num_stats < 0 || num_stats > (int)expected_entries)
3020                         return num_stats;
3021                 else
3022                         return expected_entries;
3023         }
3024
3025         xstats_names_copy = calloc(expected_entries,
3026                 sizeof(struct rte_eth_xstat_name));
3027
3028         if (!xstats_names_copy) {
3029                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
3030                 return -ENOMEM;
3031         }
3032
3033         if (ids) {
3034                 for (i = 0; i < size; i++) {
3035                         if (ids[i] >= basic_count) {
3036                                 no_ext_stat_requested = 0;
3037                                 break;
3038                         }
3039                 }
3040         }
3041
3042         /* Fill xstats_names_copy structure */
3043         if (ids && no_ext_stat_requested) {
3044                 eth_basic_stats_get_names(dev, xstats_names_copy);
3045         } else {
3046                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
3047                         expected_entries);
3048                 if (ret < 0) {
3049                         free(xstats_names_copy);
3050                         return ret;
3051                 }
3052         }
3053
3054         /* Filter stats */
3055         for (i = 0; i < size; i++) {
3056                 if (ids[i] >= expected_entries) {
3057                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3058                         free(xstats_names_copy);
3059                         return -1;
3060                 }
3061                 xstats_names[i] = xstats_names_copy[ids[i]];
3062         }
3063
3064         free(xstats_names_copy);
3065         return size;
3066 }
3067
3068 int
3069 rte_eth_xstats_get_names(uint16_t port_id,
3070         struct rte_eth_xstat_name *xstats_names,
3071         unsigned int size)
3072 {
3073         struct rte_eth_dev *dev;
3074         int cnt_used_entries;
3075         int cnt_expected_entries;
3076         int cnt_driver_entries;
3077
3078         cnt_expected_entries = eth_dev_get_xstats_count(port_id);
3079         if (xstats_names == NULL || cnt_expected_entries < 0 ||
3080                         (int)size < cnt_expected_entries)
3081                 return cnt_expected_entries;
3082
3083         /* port_id checked in eth_dev_get_xstats_count() */
3084         dev = &rte_eth_devices[port_id];
3085
3086         cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3087
3088         if (dev->dev_ops->xstats_get_names != NULL) {
3089                 /* If there are any driver-specific xstats, append them
3090                  * to end of list.
3091                  */
3092                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3093                         dev,
3094                         xstats_names + cnt_used_entries,
3095                         size - cnt_used_entries);
3096                 if (cnt_driver_entries < 0)
3097                         return eth_err(port_id, cnt_driver_entries);
3098                 cnt_used_entries += cnt_driver_entries;
3099         }
3100
3101         return cnt_used_entries;
3102 }
3103
3104
3105 static int
3106 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
3107 {
3108         struct rte_eth_dev *dev;
3109         struct rte_eth_stats eth_stats;
3110         unsigned int count = 0, i, q;
3111         uint64_t val, *stats_ptr;
3112         uint16_t nb_rxqs, nb_txqs;
3113         int ret;
3114
3115         ret = rte_eth_stats_get(port_id, &eth_stats);
3116         if (ret < 0)
3117                 return ret;
3118
3119         dev = &rte_eth_devices[port_id];
3120
3121         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3122         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3123
3124         /* global stats */
3125         for (i = 0; i < RTE_NB_STATS; i++) {
3126                 stats_ptr = RTE_PTR_ADD(&eth_stats,
3127                                         eth_dev_stats_strings[i].offset);
3128                 val = *stats_ptr;
3129                 xstats[count++].value = val;
3130         }
3131
3132         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3133                 return count;
3134
3135         /* per-rxq stats */
3136         for (q = 0; q < nb_rxqs; q++) {
3137                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3138                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3139                                         eth_dev_rxq_stats_strings[i].offset +
3140                                         q * sizeof(uint64_t));
3141                         val = *stats_ptr;
3142                         xstats[count++].value = val;
3143                 }
3144         }
3145
3146         /* per-txq stats */
3147         for (q = 0; q < nb_txqs; q++) {
3148                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3149                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3150                                         eth_dev_txq_stats_strings[i].offset +
3151                                         q * sizeof(uint64_t));
3152                         val = *stats_ptr;
3153                         xstats[count++].value = val;
3154                 }
3155         }
3156         return count;
3157 }
3158
3159 /* retrieve ethdev extended statistics */
3160 int
3161 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3162                          uint64_t *values, unsigned int size)
3163 {
3164         unsigned int no_basic_stat_requested = 1;
3165         unsigned int no_ext_stat_requested = 1;
3166         unsigned int num_xstats_filled;
3167         unsigned int basic_count;
3168         uint16_t expected_entries;
3169         struct rte_eth_dev *dev;
3170         unsigned int i;
3171         int ret;
3172
3173         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3174         dev = &rte_eth_devices[port_id];
3175
3176         ret = eth_dev_get_xstats_count(port_id);
3177         if (ret < 0)
3178                 return ret;
3179         expected_entries = (uint16_t)ret;
3180         struct rte_eth_xstat xstats[expected_entries];
3181         basic_count = eth_dev_get_xstats_basic_count(dev);
3182
3183         /* Return max number of stats if no ids given */
3184         if (!ids) {
3185                 if (!values)
3186                         return expected_entries;
3187                 else if (values && size < expected_entries)
3188                         return expected_entries;
3189         }
3190
3191         if (ids && !values)
3192                 return -EINVAL;
3193
3194         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3195                 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3196                 uint64_t ids_copy[size];
3197
3198                 for (i = 0; i < size; i++) {
3199                         if (ids[i] < basic_count) {
3200                                 no_basic_stat_requested = 0;
3201                                 break;
3202                         }
3203
3204                         /*
3205                          * Convert ids to xstats ids that PMD knows.
3206                          * ids known by user are basic + extended stats.
3207                          */
3208                         ids_copy[i] = ids[i] - basic_count;
3209                 }
3210
3211                 if (no_basic_stat_requested)
3212                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3213                                         values, size);
3214         }
3215
3216         if (ids) {
3217                 for (i = 0; i < size; i++) {
3218                         if (ids[i] >= basic_count) {
3219                                 no_ext_stat_requested = 0;
3220                                 break;
3221                         }
3222                 }
3223         }
3224
3225         /* Fill the xstats structure */
3226         if (ids && no_ext_stat_requested)
3227                 ret = eth_basic_stats_get(port_id, xstats);
3228         else
3229                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3230
3231         if (ret < 0)
3232                 return ret;
3233         num_xstats_filled = (unsigned int)ret;
3234
3235         /* Return all stats */
3236         if (!ids) {
3237                 for (i = 0; i < num_xstats_filled; i++)
3238                         values[i] = xstats[i].value;
3239                 return expected_entries;
3240         }
3241
3242         /* Filter stats */
3243         for (i = 0; i < size; i++) {
3244                 if (ids[i] >= expected_entries) {
3245                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3246                         return -1;
3247                 }
3248                 values[i] = xstats[ids[i]].value;
3249         }
3250         return size;
3251 }
3252
3253 int
3254 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3255         unsigned int n)
3256 {
3257         struct rte_eth_dev *dev;
3258         unsigned int count = 0, i;
3259         signed int xcount = 0;
3260         uint16_t nb_rxqs, nb_txqs;
3261         int ret;
3262
3263         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3264         dev = &rte_eth_devices[port_id];
3265
3266         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3267         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3268
3269         /* Return generic statistics */
3270         count = RTE_NB_STATS;
3271         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3272                 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3273
3274         /* implemented by the driver */
3275         if (dev->dev_ops->xstats_get != NULL) {
3276                 /* Retrieve the xstats from the driver at the end of the
3277                  * xstats struct.
3278                  */
3279                 xcount = (*dev->dev_ops->xstats_get)(dev,
3280                                      xstats ? xstats + count : NULL,
3281                                      (n > count) ? n - count : 0);
3282
3283                 if (xcount < 0)
3284                         return eth_err(port_id, xcount);
3285         }
3286
3287         if (n < count + xcount || xstats == NULL)
3288                 return count + xcount;
3289
3290         /* now fill the xstats structure */
3291         ret = eth_basic_stats_get(port_id, xstats);
3292         if (ret < 0)
3293                 return ret;
3294         count = ret;
3295
3296         for (i = 0; i < count; i++)
3297                 xstats[i].id = i;
3298         /* add an offset to driver-specific stats */
3299         for ( ; i < count + xcount; i++)
3300                 xstats[i].id += count;
3301
3302         return count + xcount;
3303 }
3304
3305 /* reset ethdev extended statistics */
3306 int
3307 rte_eth_xstats_reset(uint16_t port_id)
3308 {
3309         struct rte_eth_dev *dev;
3310
3311         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3312         dev = &rte_eth_devices[port_id];
3313
3314         /* implemented by the driver */
3315         if (dev->dev_ops->xstats_reset != NULL)
3316                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3317
3318         /* fallback to default */
3319         return rte_eth_stats_reset(port_id);
3320 }
3321
3322 static int
3323 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3324                 uint8_t stat_idx, uint8_t is_rx)
3325 {
3326         struct rte_eth_dev *dev;
3327
3328         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3329         dev = &rte_eth_devices[port_id];
3330
3331         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3332                 return -EINVAL;
3333
3334         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3335                 return -EINVAL;
3336
3337         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3338                 return -EINVAL;
3339
3340         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3341         return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3342 }
3343
3344 int
3345 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3346                 uint8_t stat_idx)
3347 {
3348         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3349                                                 tx_queue_id,
3350                                                 stat_idx, STAT_QMAP_TX));
3351 }
3352
3353 int
3354 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3355                 uint8_t stat_idx)
3356 {
3357         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3358                                                 rx_queue_id,
3359                                                 stat_idx, STAT_QMAP_RX));
3360 }
3361
3362 int
3363 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3364 {
3365         struct rte_eth_dev *dev;
3366
3367         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3368         dev = &rte_eth_devices[port_id];
3369
3370         if (fw_version == NULL && fw_size > 0) {
3371                 RTE_ETHDEV_LOG(ERR,
3372                         "Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3373                         port_id);
3374                 return -EINVAL;
3375         }
3376
3377         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3378         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3379                                                         fw_version, fw_size));
3380 }
3381
3382 int
3383 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3384 {
3385         struct rte_eth_dev *dev;
3386         const struct rte_eth_desc_lim lim = {
3387                 .nb_max = UINT16_MAX,
3388                 .nb_min = 0,
3389                 .nb_align = 1,
3390                 .nb_seg_max = UINT16_MAX,
3391                 .nb_mtu_seg_max = UINT16_MAX,
3392         };
3393         int diag;
3394
3395         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3396         dev = &rte_eth_devices[port_id];
3397
3398         if (dev_info == NULL) {
3399                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3400                         port_id);
3401                 return -EINVAL;
3402         }
3403
3404         /*
3405          * Init dev_info before port_id check since caller does not have
3406          * return status and does not know if get is successful or not.
3407          */
3408         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3409         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3410
3411         dev_info->rx_desc_lim = lim;
3412         dev_info->tx_desc_lim = lim;
3413         dev_info->device = dev->device;
3414         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3415         dev_info->max_mtu = UINT16_MAX;
3416
3417         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3418         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3419         if (diag != 0) {
3420                 /* Cleanup already filled in device information */
3421                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3422                 return eth_err(port_id, diag);
3423         }
3424
3425         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3426         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3427                         RTE_MAX_QUEUES_PER_PORT);
3428         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3429                         RTE_MAX_QUEUES_PER_PORT);
3430
3431         dev_info->driver_name = dev->device->driver->name;
3432         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3433         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3434
3435         dev_info->dev_flags = &dev->data->dev_flags;
3436
3437         return 0;
3438 }
3439
3440 int
3441 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3442                                  uint32_t *ptypes, int num)
3443 {
3444         int i, j;
3445         struct rte_eth_dev *dev;
3446         const uint32_t *all_ptypes;
3447
3448         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3449         dev = &rte_eth_devices[port_id];
3450
3451         if (ptypes == NULL && num > 0) {
3452                 RTE_ETHDEV_LOG(ERR,
3453                         "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3454                         port_id);
3455                 return -EINVAL;
3456         }
3457
3458         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3459         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3460
3461         if (!all_ptypes)
3462                 return 0;
3463
3464         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3465                 if (all_ptypes[i] & ptype_mask) {
3466                         if (j < num)
3467                                 ptypes[j] = all_ptypes[i];
3468                         j++;
3469                 }
3470
3471         return j;
3472 }
3473
3474 int
3475 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3476                                  uint32_t *set_ptypes, unsigned int num)
3477 {
3478         const uint32_t valid_ptype_masks[] = {
3479                 RTE_PTYPE_L2_MASK,
3480                 RTE_PTYPE_L3_MASK,
3481                 RTE_PTYPE_L4_MASK,
3482                 RTE_PTYPE_TUNNEL_MASK,
3483                 RTE_PTYPE_INNER_L2_MASK,
3484                 RTE_PTYPE_INNER_L3_MASK,
3485                 RTE_PTYPE_INNER_L4_MASK,
3486         };
3487         const uint32_t *all_ptypes;
3488         struct rte_eth_dev *dev;
3489         uint32_t unused_mask;
3490         unsigned int i, j;
3491         int ret;
3492
3493         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3494         dev = &rte_eth_devices[port_id];
3495
3496         if (num > 0 && set_ptypes == NULL) {
3497                 RTE_ETHDEV_LOG(ERR,
3498                         "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3499                         port_id);
3500                 return -EINVAL;
3501         }
3502
3503         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3504                         *dev->dev_ops->dev_ptypes_set == NULL) {
3505                 ret = 0;
3506                 goto ptype_unknown;
3507         }
3508
3509         if (ptype_mask == 0) {
3510                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3511                                 ptype_mask);
3512                 goto ptype_unknown;
3513         }
3514
3515         unused_mask = ptype_mask;
3516         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3517                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3518                 if (mask && mask != valid_ptype_masks[i]) {
3519                         ret = -EINVAL;
3520                         goto ptype_unknown;
3521                 }
3522                 unused_mask &= ~valid_ptype_masks[i];
3523         }
3524
3525         if (unused_mask) {
3526                 ret = -EINVAL;
3527                 goto ptype_unknown;
3528         }
3529
3530         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3531         if (all_ptypes == NULL) {
3532                 ret = 0;
3533                 goto ptype_unknown;
3534         }
3535
3536         /*
3537          * Accommodate as many set_ptypes as possible. If the supplied
3538          * set_ptypes array is insufficient fill it partially.
3539          */
3540         for (i = 0, j = 0; set_ptypes != NULL &&
3541                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3542                 if (ptype_mask & all_ptypes[i]) {
3543                         if (j < num - 1) {
3544                                 set_ptypes[j] = all_ptypes[i];
3545                                 j++;
3546                                 continue;
3547                         }
3548                         break;
3549                 }
3550         }
3551
3552         if (set_ptypes != NULL && j < num)
3553                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3554
3555         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3556
3557 ptype_unknown:
3558         if (num > 0)
3559                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3560
3561         return ret;
3562 }
3563
3564 int
3565 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3566 {
3567         struct rte_eth_dev *dev;
3568
3569         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3570         dev = &rte_eth_devices[port_id];
3571
3572         if (mac_addr == NULL) {
3573                 RTE_ETHDEV_LOG(ERR,
3574                         "Cannot get ethdev port %u MAC address to NULL\n",
3575                         port_id);
3576                 return -EINVAL;
3577         }
3578
3579         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3580
3581         return 0;
3582 }
3583
3584 int
3585 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3586 {
3587         struct rte_eth_dev *dev;
3588
3589         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3590         dev = &rte_eth_devices[port_id];
3591
3592         if (mtu == NULL) {
3593                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3594                         port_id);
3595                 return -EINVAL;
3596         }
3597
3598         *mtu = dev->data->mtu;
3599         return 0;
3600 }
3601
3602 int
3603 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3604 {
3605         int ret;
3606         struct rte_eth_dev_info dev_info;
3607         struct rte_eth_dev *dev;
3608
3609         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3610         dev = &rte_eth_devices[port_id];
3611         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3612
3613         /*
3614          * Check if the device supports dev_infos_get, if it does not
3615          * skip min_mtu/max_mtu validation here as this requires values
3616          * that are populated within the call to rte_eth_dev_info_get()
3617          * which relies on dev->dev_ops->dev_infos_get.
3618          */
3619         if (*dev->dev_ops->dev_infos_get != NULL) {
3620                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3621                 if (ret != 0)
3622                         return ret;
3623
3624                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3625                         return -EINVAL;
3626         }
3627
3628         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3629         if (!ret)
3630                 dev->data->mtu = mtu;
3631
3632         return eth_err(port_id, ret);
3633 }
3634
3635 int
3636 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3637 {
3638         struct rte_eth_dev *dev;
3639         int ret;
3640
3641         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3642         dev = &rte_eth_devices[port_id];
3643
3644         if (!(dev->data->dev_conf.rxmode.offloads &
3645               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3646                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3647                         port_id);
3648                 return -ENOSYS;
3649         }
3650
3651         if (vlan_id > 4095) {
3652                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3653                         port_id, vlan_id);
3654                 return -EINVAL;
3655         }
3656         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3657
3658         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3659         if (ret == 0) {
3660                 struct rte_vlan_filter_conf *vfc;
3661                 int vidx;
3662                 int vbit;
3663
3664                 vfc = &dev->data->vlan_filter_conf;
3665                 vidx = vlan_id / 64;
3666                 vbit = vlan_id % 64;
3667
3668                 if (on)
3669                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3670                 else
3671                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3672         }
3673
3674         return eth_err(port_id, ret);
3675 }
3676
3677 int
3678 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3679                                     int on)
3680 {
3681         struct rte_eth_dev *dev;
3682
3683         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3684         dev = &rte_eth_devices[port_id];
3685
3686         if (rx_queue_id >= dev->data->nb_rx_queues) {
3687                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3688                 return -EINVAL;
3689         }
3690
3691         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3692         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3693
3694         return 0;
3695 }
3696
3697 int
3698 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3699                                 enum rte_vlan_type vlan_type,
3700                                 uint16_t tpid)
3701 {
3702         struct rte_eth_dev *dev;
3703
3704         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3705         dev = &rte_eth_devices[port_id];
3706
3707         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3708         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3709                                                                tpid));
3710 }
3711
3712 int
3713 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3714 {
3715         struct rte_eth_dev_info dev_info;
3716         struct rte_eth_dev *dev;
3717         int ret = 0;
3718         int mask = 0;
3719         int cur, org = 0;
3720         uint64_t orig_offloads;
3721         uint64_t dev_offloads;
3722         uint64_t new_offloads;
3723
3724         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3725         dev = &rte_eth_devices[port_id];
3726
3727         /* save original values in case of failure */
3728         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3729         dev_offloads = orig_offloads;
3730
3731         /* check which option changed by application */
3732         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3733         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3734         if (cur != org) {
3735                 if (cur)
3736                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3737                 else
3738                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3739                 mask |= ETH_VLAN_STRIP_MASK;
3740         }
3741
3742         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3743         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3744         if (cur != org) {
3745                 if (cur)
3746                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3747                 else
3748                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3749                 mask |= ETH_VLAN_FILTER_MASK;
3750         }
3751
3752         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3753         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3754         if (cur != org) {
3755                 if (cur)
3756                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3757                 else
3758                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3759                 mask |= ETH_VLAN_EXTEND_MASK;
3760         }
3761
3762         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3763         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3764         if (cur != org) {
3765                 if (cur)
3766                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3767                 else
3768                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3769                 mask |= ETH_QINQ_STRIP_MASK;
3770         }
3771
3772         /*no change*/
3773         if (mask == 0)
3774                 return ret;
3775
3776         ret = rte_eth_dev_info_get(port_id, &dev_info);
3777         if (ret != 0)
3778                 return ret;
3779
3780         /* Rx VLAN offloading must be within its device capabilities */
3781         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3782                 new_offloads = dev_offloads & ~orig_offloads;
3783                 RTE_ETHDEV_LOG(ERR,
3784                         "Ethdev port_id=%u requested new added VLAN offloads "
3785                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3786                         "0x%" PRIx64 " in %s()\n",
3787                         port_id, new_offloads, dev_info.rx_offload_capa,
3788                         __func__);
3789                 return -EINVAL;
3790         }
3791
3792         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3793         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3794         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3795         if (ret) {
3796                 /* hit an error restore  original values */
3797                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3798         }
3799
3800         return eth_err(port_id, ret);
3801 }
3802
3803 int
3804 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3805 {
3806         struct rte_eth_dev *dev;
3807         uint64_t *dev_offloads;
3808         int ret = 0;
3809
3810         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3811         dev = &rte_eth_devices[port_id];
3812         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3813
3814         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3815                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3816
3817         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3818                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3819
3820         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3821                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3822
3823         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3824                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3825
3826         return ret;
3827 }
3828
3829 int
3830 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3831 {
3832         struct rte_eth_dev *dev;
3833
3834         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3835         dev = &rte_eth_devices[port_id];
3836
3837         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3838         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3839 }
3840
3841 int
3842 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3843 {
3844         struct rte_eth_dev *dev;
3845
3846         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3847         dev = &rte_eth_devices[port_id];
3848
3849         if (fc_conf == NULL) {
3850                 RTE_ETHDEV_LOG(ERR,
3851                         "Cannot get ethdev port %u flow control config to NULL\n",
3852                         port_id);
3853                 return -EINVAL;
3854         }
3855
3856         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3857         memset(fc_conf, 0, sizeof(*fc_conf));
3858         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3859 }
3860
3861 int
3862 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3863 {
3864         struct rte_eth_dev *dev;
3865
3866         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3867         dev = &rte_eth_devices[port_id];
3868
3869         if (fc_conf == NULL) {
3870                 RTE_ETHDEV_LOG(ERR,
3871                         "Cannot set ethdev port %u flow control from NULL config\n",
3872                         port_id);
3873                 return -EINVAL;
3874         }
3875
3876         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3877                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3878                 return -EINVAL;
3879         }
3880
3881         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3882         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3883 }
3884
3885 int
3886 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3887                                    struct rte_eth_pfc_conf *pfc_conf)
3888 {
3889         struct rte_eth_dev *dev;
3890
3891         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3892         dev = &rte_eth_devices[port_id];
3893
3894         if (pfc_conf == NULL) {
3895                 RTE_ETHDEV_LOG(ERR,
3896                         "Cannot set ethdev port %u priority flow control from NULL config\n",
3897                         port_id);
3898                 return -EINVAL;
3899         }
3900
3901         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3902                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3903                 return -EINVAL;
3904         }
3905
3906         /* High water, low water validation are device specific */
3907         if  (*dev->dev_ops->priority_flow_ctrl_set)
3908                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3909                                         (dev, pfc_conf));
3910         return -ENOTSUP;
3911 }
3912
3913 static int
3914 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3915                         uint16_t reta_size)
3916 {
3917         uint16_t i, num;
3918
3919         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3920         for (i = 0; i < num; i++) {
3921                 if (reta_conf[i].mask)
3922                         return 0;
3923         }
3924
3925         return -EINVAL;
3926 }
3927
3928 static int
3929 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3930                          uint16_t reta_size,
3931                          uint16_t max_rxq)
3932 {
3933         uint16_t i, idx, shift;
3934
3935         if (max_rxq == 0) {
3936                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3937                 return -EINVAL;
3938         }
3939
3940         for (i = 0; i < reta_size; i++) {
3941                 idx = i / RTE_RETA_GROUP_SIZE;
3942                 shift = i % RTE_RETA_GROUP_SIZE;
3943                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3944                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3945                         RTE_ETHDEV_LOG(ERR,
3946                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3947                                 idx, shift,
3948                                 reta_conf[idx].reta[shift], max_rxq);
3949                         return -EINVAL;
3950                 }
3951         }
3952
3953         return 0;
3954 }
3955
3956 int
3957 rte_eth_dev_rss_reta_update(uint16_t port_id,
3958                             struct rte_eth_rss_reta_entry64 *reta_conf,
3959                             uint16_t reta_size)
3960 {
3961         struct rte_eth_dev *dev;
3962         int ret;
3963
3964         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3965         dev = &rte_eth_devices[port_id];
3966
3967         if (reta_conf == NULL) {
3968                 RTE_ETHDEV_LOG(ERR,
3969                         "Cannot update ethdev port %u RSS RETA to NULL\n",
3970                         port_id);
3971                 return -EINVAL;
3972         }
3973
3974         if (reta_size == 0) {
3975                 RTE_ETHDEV_LOG(ERR,
3976                         "Cannot update ethdev port %u RSS RETA with zero size\n",
3977                         port_id);
3978                 return -EINVAL;
3979         }
3980
3981         /* Check mask bits */
3982         ret = eth_check_reta_mask(reta_conf, reta_size);
3983         if (ret < 0)
3984                 return ret;
3985
3986         /* Check entry value */
3987         ret = eth_check_reta_entry(reta_conf, reta_size,
3988                                 dev->data->nb_rx_queues);
3989         if (ret < 0)
3990                 return ret;
3991
3992         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3993         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3994                                                              reta_size));
3995 }
3996
3997 int
3998 rte_eth_dev_rss_reta_query(uint16_t port_id,
3999                            struct rte_eth_rss_reta_entry64 *reta_conf,
4000                            uint16_t reta_size)
4001 {
4002         struct rte_eth_dev *dev;
4003         int ret;
4004
4005         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4006         dev = &rte_eth_devices[port_id];
4007
4008         if (reta_conf == NULL) {
4009                 RTE_ETHDEV_LOG(ERR,
4010                         "Cannot query ethdev port %u RSS RETA from NULL config\n",
4011                         port_id);
4012                 return -EINVAL;
4013         }
4014
4015         /* Check mask bits */
4016         ret = eth_check_reta_mask(reta_conf, reta_size);
4017         if (ret < 0)
4018                 return ret;
4019
4020         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
4021         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4022                                                             reta_size));
4023 }
4024
4025 int
4026 rte_eth_dev_rss_hash_update(uint16_t port_id,
4027                             struct rte_eth_rss_conf *rss_conf)
4028 {
4029         struct rte_eth_dev *dev;
4030         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
4031         int ret;
4032
4033         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4034         dev = &rte_eth_devices[port_id];
4035
4036         if (rss_conf == NULL) {
4037                 RTE_ETHDEV_LOG(ERR,
4038                         "Cannot update ethdev port %u RSS hash from NULL config\n",
4039                         port_id);
4040                 return -EINVAL;
4041         }
4042
4043         ret = rte_eth_dev_info_get(port_id, &dev_info);
4044         if (ret != 0)
4045                 return ret;
4046
4047         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4048         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4049             dev_info.flow_type_rss_offloads) {
4050                 RTE_ETHDEV_LOG(ERR,
4051                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4052                         port_id, rss_conf->rss_hf,
4053                         dev_info.flow_type_rss_offloads);
4054                 return -EINVAL;
4055         }
4056         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
4057         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4058                                                                  rss_conf));
4059 }
4060
4061 int
4062 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4063                               struct rte_eth_rss_conf *rss_conf)
4064 {
4065         struct rte_eth_dev *dev;
4066
4067         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4068         dev = &rte_eth_devices[port_id];
4069
4070         if (rss_conf == NULL) {
4071                 RTE_ETHDEV_LOG(ERR,
4072                         "Cannot get ethdev port %u RSS hash config to NULL\n",
4073                         port_id);
4074                 return -EINVAL;
4075         }
4076
4077         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
4078         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4079                                                                    rss_conf));
4080 }
4081
4082 int
4083 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4084                                 struct rte_eth_udp_tunnel *udp_tunnel)
4085 {
4086         struct rte_eth_dev *dev;
4087
4088         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4089         dev = &rte_eth_devices[port_id];
4090
4091         if (udp_tunnel == NULL) {
4092                 RTE_ETHDEV_LOG(ERR,
4093                         "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4094                         port_id);
4095                 return -EINVAL;
4096         }
4097
4098         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
4099                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4100                 return -EINVAL;
4101         }
4102
4103         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
4104         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4105                                                                 udp_tunnel));
4106 }
4107
4108 int
4109 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4110                                    struct rte_eth_udp_tunnel *udp_tunnel)
4111 {
4112         struct rte_eth_dev *dev;
4113
4114         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4115         dev = &rte_eth_devices[port_id];
4116
4117         if (udp_tunnel == NULL) {
4118                 RTE_ETHDEV_LOG(ERR,
4119                         "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4120                         port_id);
4121                 return -EINVAL;
4122         }
4123
4124         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
4125                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4126                 return -EINVAL;
4127         }
4128
4129         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
4130         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4131                                                                 udp_tunnel));
4132 }
4133
4134 int
4135 rte_eth_led_on(uint16_t port_id)
4136 {
4137         struct rte_eth_dev *dev;
4138
4139         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4140         dev = &rte_eth_devices[port_id];
4141
4142         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
4143         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4144 }
4145
4146 int
4147 rte_eth_led_off(uint16_t port_id)
4148 {
4149         struct rte_eth_dev *dev;
4150
4151         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4152         dev = &rte_eth_devices[port_id];
4153
4154         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
4155         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4156 }
4157
4158 int
4159 rte_eth_fec_get_capability(uint16_t port_id,
4160                            struct rte_eth_fec_capa *speed_fec_capa,
4161                            unsigned int num)
4162 {
4163         struct rte_eth_dev *dev;
4164         int ret;
4165
4166         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4167         dev = &rte_eth_devices[port_id];
4168
4169         if (speed_fec_capa == NULL && num > 0) {
4170                 RTE_ETHDEV_LOG(ERR,
4171                         "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4172                         port_id);
4173                 return -EINVAL;
4174         }
4175
4176         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
4177         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4178
4179         return ret;
4180 }
4181
4182 int
4183 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4184 {
4185         struct rte_eth_dev *dev;
4186
4187         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4188         dev = &rte_eth_devices[port_id];
4189
4190         if (fec_capa == NULL) {
4191                 RTE_ETHDEV_LOG(ERR,
4192                         "Cannot get ethdev port %u current FEC mode to NULL\n",
4193                         port_id);
4194                 return -EINVAL;
4195         }
4196
4197         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
4198         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4199 }
4200
4201 int
4202 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4203 {
4204         struct rte_eth_dev *dev;
4205
4206         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4207         dev = &rte_eth_devices[port_id];
4208
4209         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
4210         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4211 }
4212
4213 /*
4214  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4215  * an empty spot.
4216  */
4217 static int
4218 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4219 {
4220         struct rte_eth_dev_info dev_info;
4221         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4222         unsigned i;
4223         int ret;
4224
4225         ret = rte_eth_dev_info_get(port_id, &dev_info);
4226         if (ret != 0)
4227                 return -1;
4228
4229         for (i = 0; i < dev_info.max_mac_addrs; i++)
4230                 if (memcmp(addr, &dev->data->mac_addrs[i],
4231                                 RTE_ETHER_ADDR_LEN) == 0)
4232                         return i;
4233
4234         return -1;
4235 }
4236
4237 static const struct rte_ether_addr null_mac_addr;
4238
4239 int
4240 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4241                         uint32_t pool)
4242 {
4243         struct rte_eth_dev *dev;
4244         int index;
4245         uint64_t pool_mask;
4246         int ret;
4247
4248         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4249         dev = &rte_eth_devices[port_id];
4250
4251         if (addr == NULL) {
4252                 RTE_ETHDEV_LOG(ERR,
4253                         "Cannot add ethdev port %u MAC address from NULL address\n",
4254                         port_id);
4255                 return -EINVAL;
4256         }
4257
4258         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4259
4260         if (rte_is_zero_ether_addr(addr)) {
4261                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4262                         port_id);
4263                 return -EINVAL;
4264         }
4265         if (pool >= ETH_64_POOLS) {
4266                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
4267                 return -EINVAL;
4268         }
4269
4270         index = eth_dev_get_mac_addr_index(port_id, addr);
4271         if (index < 0) {
4272                 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4273                 if (index < 0) {
4274                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4275                                 port_id);
4276                         return -ENOSPC;
4277                 }
4278         } else {
4279                 pool_mask = dev->data->mac_pool_sel[index];
4280
4281                 /* Check if both MAC address and pool is already there, and do nothing */
4282                 if (pool_mask & (1ULL << pool))
4283                         return 0;
4284         }
4285
4286         /* Update NIC */
4287         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4288
4289         if (ret == 0) {
4290                 /* Update address in NIC data structure */
4291                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4292
4293                 /* Update pool bitmap in NIC data structure */
4294                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
4295         }
4296
4297         return eth_err(port_id, ret);
4298 }
4299
4300 int
4301 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4302 {
4303         struct rte_eth_dev *dev;
4304         int index;
4305
4306         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4307         dev = &rte_eth_devices[port_id];
4308
4309         if (addr == NULL) {
4310                 RTE_ETHDEV_LOG(ERR,
4311                         "Cannot remove ethdev port %u MAC address from NULL address\n",
4312                         port_id);
4313                 return -EINVAL;
4314         }
4315
4316         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4317
4318         index = eth_dev_get_mac_addr_index(port_id, addr);
4319         if (index == 0) {
4320                 RTE_ETHDEV_LOG(ERR,
4321                         "Port %u: Cannot remove default MAC address\n",
4322                         port_id);
4323                 return -EADDRINUSE;
4324         } else if (index < 0)
4325                 return 0;  /* Do nothing if address wasn't found */
4326
4327         /* Update NIC */
4328         (*dev->dev_ops->mac_addr_remove)(dev, index);
4329
4330         /* Update address in NIC data structure */
4331         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4332
4333         /* reset pool bitmap */
4334         dev->data->mac_pool_sel[index] = 0;
4335
4336         return 0;
4337 }
4338
4339 int
4340 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4341 {
4342         struct rte_eth_dev *dev;
4343         int ret;
4344
4345         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4346         dev = &rte_eth_devices[port_id];
4347
4348         if (addr == NULL) {
4349                 RTE_ETHDEV_LOG(ERR,
4350                         "Cannot set ethdev port %u default MAC address from NULL address\n",
4351                         port_id);
4352                 return -EINVAL;
4353         }
4354
4355         if (!rte_is_valid_assigned_ether_addr(addr))
4356                 return -EINVAL;
4357
4358         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4359
4360         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4361         if (ret < 0)
4362                 return ret;
4363
4364         /* Update default address in NIC data structure */
4365         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4366
4367         return 0;
4368 }
4369
4370
4371 /*
4372  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4373  * an empty spot.
4374  */
4375 static int
4376 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4377                 const struct rte_ether_addr *addr)
4378 {
4379         struct rte_eth_dev_info dev_info;
4380         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4381         unsigned i;
4382         int ret;
4383
4384         ret = rte_eth_dev_info_get(port_id, &dev_info);
4385         if (ret != 0)
4386                 return -1;
4387
4388         if (!dev->data->hash_mac_addrs)
4389                 return -1;
4390
4391         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4392                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4393                         RTE_ETHER_ADDR_LEN) == 0)
4394                         return i;
4395
4396         return -1;
4397 }
4398
4399 int
4400 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4401                                 uint8_t on)
4402 {
4403         int index;
4404         int ret;
4405         struct rte_eth_dev *dev;
4406
4407         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4408         dev = &rte_eth_devices[port_id];
4409
4410         if (addr == NULL) {
4411                 RTE_ETHDEV_LOG(ERR,
4412                         "Cannot set ethdev port %u unicast hash table from NULL address\n",
4413                         port_id);
4414                 return -EINVAL;
4415         }
4416
4417         if (rte_is_zero_ether_addr(addr)) {
4418                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4419                         port_id);
4420                 return -EINVAL;
4421         }
4422
4423         index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4424         /* Check if it's already there, and do nothing */
4425         if ((index >= 0) && on)
4426                 return 0;
4427
4428         if (index < 0) {
4429                 if (!on) {
4430                         RTE_ETHDEV_LOG(ERR,
4431                                 "Port %u: the MAC address was not set in UTA\n",
4432                                 port_id);
4433                         return -EINVAL;
4434                 }
4435
4436                 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4437                 if (index < 0) {
4438                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4439                                 port_id);
4440                         return -ENOSPC;
4441                 }
4442         }
4443
4444         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4445         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4446         if (ret == 0) {
4447                 /* Update address in NIC data structure */
4448                 if (on)
4449                         rte_ether_addr_copy(addr,
4450                                         &dev->data->hash_mac_addrs[index]);
4451                 else
4452                         rte_ether_addr_copy(&null_mac_addr,
4453                                         &dev->data->hash_mac_addrs[index]);
4454         }
4455
4456         return eth_err(port_id, ret);
4457 }
4458
4459 int
4460 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4461 {
4462         struct rte_eth_dev *dev;
4463
4464         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4465         dev = &rte_eth_devices[port_id];
4466
4467         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4468         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4469                                                                        on));
4470 }
4471
4472 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4473                                         uint16_t tx_rate)
4474 {
4475         struct rte_eth_dev *dev;
4476         struct rte_eth_dev_info dev_info;
4477         struct rte_eth_link link;
4478         int ret;
4479
4480         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4481         dev = &rte_eth_devices[port_id];
4482
4483         ret = rte_eth_dev_info_get(port_id, &dev_info);
4484         if (ret != 0)
4485                 return ret;
4486
4487         link = dev->data->dev_link;
4488
4489         if (queue_idx > dev_info.max_tx_queues) {
4490                 RTE_ETHDEV_LOG(ERR,
4491                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4492                         port_id, queue_idx);
4493                 return -EINVAL;
4494         }
4495
4496         if (tx_rate > link.link_speed) {
4497                 RTE_ETHDEV_LOG(ERR,
4498                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4499                         tx_rate, link.link_speed);
4500                 return -EINVAL;
4501         }
4502
4503         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4504         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4505                                                         queue_idx, tx_rate));
4506 }
4507
4508 RTE_INIT(eth_dev_init_cb_lists)
4509 {
4510         uint16_t i;
4511
4512         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4513                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4514 }
4515
4516 int
4517 rte_eth_dev_callback_register(uint16_t port_id,
4518                         enum rte_eth_event_type event,
4519                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4520 {
4521         struct rte_eth_dev *dev;
4522         struct rte_eth_dev_callback *user_cb;
4523         uint16_t next_port;
4524         uint16_t last_port;
4525
4526         if (cb_fn == NULL) {
4527                 RTE_ETHDEV_LOG(ERR,
4528                         "Cannot register ethdev port %u callback from NULL\n",
4529                         port_id);
4530                 return -EINVAL;
4531         }
4532
4533         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4534                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4535                 return -EINVAL;
4536         }
4537
4538         if (port_id == RTE_ETH_ALL) {
4539                 next_port = 0;
4540                 last_port = RTE_MAX_ETHPORTS - 1;
4541         } else {
4542                 next_port = last_port = port_id;
4543         }
4544
4545         rte_spinlock_lock(&eth_dev_cb_lock);
4546
4547         do {
4548                 dev = &rte_eth_devices[next_port];
4549
4550                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4551                         if (user_cb->cb_fn == cb_fn &&
4552                                 user_cb->cb_arg == cb_arg &&
4553                                 user_cb->event == event) {
4554                                 break;
4555                         }
4556                 }
4557
4558                 /* create a new callback. */
4559                 if (user_cb == NULL) {
4560                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4561                                 sizeof(struct rte_eth_dev_callback), 0);
4562                         if (user_cb != NULL) {
4563                                 user_cb->cb_fn = cb_fn;
4564                                 user_cb->cb_arg = cb_arg;
4565                                 user_cb->event = event;
4566                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4567                                                   user_cb, next);
4568                         } else {
4569                                 rte_spinlock_unlock(&eth_dev_cb_lock);
4570                                 rte_eth_dev_callback_unregister(port_id, event,
4571                                                                 cb_fn, cb_arg);
4572                                 return -ENOMEM;
4573                         }
4574
4575                 }
4576         } while (++next_port <= last_port);
4577
4578         rte_spinlock_unlock(&eth_dev_cb_lock);
4579         return 0;
4580 }
4581
4582 int
4583 rte_eth_dev_callback_unregister(uint16_t port_id,
4584                         enum rte_eth_event_type event,
4585                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4586 {
4587         int ret;
4588         struct rte_eth_dev *dev;
4589         struct rte_eth_dev_callback *cb, *next;
4590         uint16_t next_port;
4591         uint16_t last_port;
4592
4593         if (cb_fn == NULL) {
4594                 RTE_ETHDEV_LOG(ERR,
4595                         "Cannot unregister ethdev port %u callback from NULL\n",
4596                         port_id);
4597                 return -EINVAL;
4598         }
4599
4600         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4601                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4602                 return -EINVAL;
4603         }
4604
4605         if (port_id == RTE_ETH_ALL) {
4606                 next_port = 0;
4607                 last_port = RTE_MAX_ETHPORTS - 1;
4608         } else {
4609                 next_port = last_port = port_id;
4610         }
4611
4612         rte_spinlock_lock(&eth_dev_cb_lock);
4613
4614         do {
4615                 dev = &rte_eth_devices[next_port];
4616                 ret = 0;
4617                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4618                      cb = next) {
4619
4620                         next = TAILQ_NEXT(cb, next);
4621
4622                         if (cb->cb_fn != cb_fn || cb->event != event ||
4623                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4624                                 continue;
4625
4626                         /*
4627                          * if this callback is not executing right now,
4628                          * then remove it.
4629                          */
4630                         if (cb->active == 0) {
4631                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4632                                 rte_free(cb);
4633                         } else {
4634                                 ret = -EAGAIN;
4635                         }
4636                 }
4637         } while (++next_port <= last_port);
4638
4639         rte_spinlock_unlock(&eth_dev_cb_lock);
4640         return ret;
4641 }
4642
4643 int
4644 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4645         enum rte_eth_event_type event, void *ret_param)
4646 {
4647         struct rte_eth_dev_callback *cb_lst;
4648         struct rte_eth_dev_callback dev_cb;
4649         int rc = 0;
4650
4651         rte_spinlock_lock(&eth_dev_cb_lock);
4652         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4653                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4654                         continue;
4655                 dev_cb = *cb_lst;
4656                 cb_lst->active = 1;
4657                 if (ret_param != NULL)
4658                         dev_cb.ret_param = ret_param;
4659
4660                 rte_spinlock_unlock(&eth_dev_cb_lock);
4661                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4662                                 dev_cb.cb_arg, dev_cb.ret_param);
4663                 rte_spinlock_lock(&eth_dev_cb_lock);
4664                 cb_lst->active = 0;
4665         }
4666         rte_spinlock_unlock(&eth_dev_cb_lock);
4667         return rc;
4668 }
4669
4670 void
4671 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4672 {
4673         if (dev == NULL)
4674                 return;
4675
4676         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4677
4678         dev->state = RTE_ETH_DEV_ATTACHED;
4679 }
4680
4681 int
4682 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4683 {
4684         uint32_t vec;
4685         struct rte_eth_dev *dev;
4686         struct rte_intr_handle *intr_handle;
4687         uint16_t qid;
4688         int rc;
4689
4690         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4691         dev = &rte_eth_devices[port_id];
4692
4693         if (!dev->intr_handle) {
4694                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4695                 return -ENOTSUP;
4696         }
4697
4698         intr_handle = dev->intr_handle;
4699         if (!intr_handle->intr_vec) {
4700                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4701                 return -EPERM;
4702         }
4703
4704         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4705                 vec = intr_handle->intr_vec[qid];
4706                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4707                 if (rc && rc != -EEXIST) {
4708                         RTE_ETHDEV_LOG(ERR,
4709                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4710                                 port_id, qid, op, epfd, vec);
4711                 }
4712         }
4713
4714         return 0;
4715 }
4716
4717 int
4718 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4719 {
4720         struct rte_intr_handle *intr_handle;
4721         struct rte_eth_dev *dev;
4722         unsigned int efd_idx;
4723         uint32_t vec;
4724         int fd;
4725
4726         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4727         dev = &rte_eth_devices[port_id];
4728
4729         if (queue_id >= dev->data->nb_rx_queues) {
4730                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4731                 return -1;
4732         }
4733
4734         if (!dev->intr_handle) {
4735                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4736                 return -1;
4737         }
4738
4739         intr_handle = dev->intr_handle;
4740         if (!intr_handle->intr_vec) {
4741                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4742                 return -1;
4743         }
4744
4745         vec = intr_handle->intr_vec[queue_id];
4746         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4747                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4748         fd = intr_handle->efds[efd_idx];
4749
4750         return fd;
4751 }
4752
4753 static inline int
4754 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4755                 const char *ring_name)
4756 {
4757         return snprintf(name, len, "eth_p%d_q%d_%s",
4758                         port_id, queue_id, ring_name);
4759 }
4760
4761 const struct rte_memzone *
4762 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4763                          uint16_t queue_id, size_t size, unsigned align,
4764                          int socket_id)
4765 {
4766         char z_name[RTE_MEMZONE_NAMESIZE];
4767         const struct rte_memzone *mz;
4768         int rc;
4769
4770         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4771                         queue_id, ring_name);
4772         if (rc >= RTE_MEMZONE_NAMESIZE) {
4773                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4774                 rte_errno = ENAMETOOLONG;
4775                 return NULL;
4776         }
4777
4778         mz = rte_memzone_lookup(z_name);
4779         if (mz) {
4780                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4781                                 size > mz->len ||
4782                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4783                         RTE_ETHDEV_LOG(ERR,
4784                                 "memzone %s does not justify the requested attributes\n",
4785                                 mz->name);
4786                         return NULL;
4787                 }
4788
4789                 return mz;
4790         }
4791
4792         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4793                         RTE_MEMZONE_IOVA_CONTIG, align);
4794 }
4795
4796 int
4797 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4798                 uint16_t queue_id)
4799 {
4800         char z_name[RTE_MEMZONE_NAMESIZE];
4801         const struct rte_memzone *mz;
4802         int rc = 0;
4803
4804         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4805                         queue_id, ring_name);
4806         if (rc >= RTE_MEMZONE_NAMESIZE) {
4807                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4808                 return -ENAMETOOLONG;
4809         }
4810
4811         mz = rte_memzone_lookup(z_name);
4812         if (mz)
4813                 rc = rte_memzone_free(mz);
4814         else
4815                 rc = -ENOENT;
4816
4817         return rc;
4818 }
4819
4820 int
4821 rte_eth_dev_create(struct rte_device *device, const char *name,
4822         size_t priv_data_size,
4823         ethdev_bus_specific_init ethdev_bus_specific_init,
4824         void *bus_init_params,
4825         ethdev_init_t ethdev_init, void *init_params)
4826 {
4827         struct rte_eth_dev *ethdev;
4828         int retval;
4829
4830         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4831
4832         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4833                 ethdev = rte_eth_dev_allocate(name);
4834                 if (!ethdev)
4835                         return -ENODEV;
4836
4837                 if (priv_data_size) {
4838                         ethdev->data->dev_private = rte_zmalloc_socket(
4839                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4840                                 device->numa_node);
4841
4842                         if (!ethdev->data->dev_private) {
4843                                 RTE_ETHDEV_LOG(ERR,
4844                                         "failed to allocate private data\n");
4845                                 retval = -ENOMEM;
4846                                 goto probe_failed;
4847                         }
4848                 }
4849         } else {
4850                 ethdev = rte_eth_dev_attach_secondary(name);
4851                 if (!ethdev) {
4852                         RTE_ETHDEV_LOG(ERR,
4853                                 "secondary process attach failed, ethdev doesn't exist\n");
4854                         return  -ENODEV;
4855                 }
4856         }
4857
4858         ethdev->device = device;
4859
4860         if (ethdev_bus_specific_init) {
4861                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4862                 if (retval) {
4863                         RTE_ETHDEV_LOG(ERR,
4864                                 "ethdev bus specific initialisation failed\n");
4865                         goto probe_failed;
4866                 }
4867         }
4868
4869         retval = ethdev_init(ethdev, init_params);
4870         if (retval) {
4871                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4872                 goto probe_failed;
4873         }
4874
4875         rte_eth_dev_probing_finish(ethdev);
4876
4877         return retval;
4878
4879 probe_failed:
4880         rte_eth_dev_release_port(ethdev);
4881         return retval;
4882 }
4883
4884 int
4885 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4886         ethdev_uninit_t ethdev_uninit)
4887 {
4888         int ret;
4889
4890         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4891         if (!ethdev)
4892                 return -ENODEV;
4893
4894         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4895
4896         ret = ethdev_uninit(ethdev);
4897         if (ret)
4898                 return ret;
4899
4900         return rte_eth_dev_release_port(ethdev);
4901 }
4902
4903 int
4904 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4905                           int epfd, int op, void *data)
4906 {
4907         uint32_t vec;
4908         struct rte_eth_dev *dev;
4909         struct rte_intr_handle *intr_handle;
4910         int rc;
4911
4912         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4913         dev = &rte_eth_devices[port_id];
4914
4915         if (queue_id >= dev->data->nb_rx_queues) {
4916                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4917                 return -EINVAL;
4918         }
4919
4920         if (!dev->intr_handle) {
4921                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4922                 return -ENOTSUP;
4923         }
4924
4925         intr_handle = dev->intr_handle;
4926         if (!intr_handle->intr_vec) {
4927                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4928                 return -EPERM;
4929         }
4930
4931         vec = intr_handle->intr_vec[queue_id];
4932         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4933         if (rc && rc != -EEXIST) {
4934                 RTE_ETHDEV_LOG(ERR,
4935                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4936                         port_id, queue_id, op, epfd, vec);
4937                 return rc;
4938         }
4939
4940         return 0;
4941 }
4942
4943 int
4944 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4945                            uint16_t queue_id)
4946 {
4947         struct rte_eth_dev *dev;
4948         int ret;
4949
4950         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4951         dev = &rte_eth_devices[port_id];
4952
4953         ret = eth_dev_validate_rx_queue(dev, queue_id);
4954         if (ret != 0)
4955                 return ret;
4956
4957         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4958         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
4959 }
4960
4961 int
4962 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4963                             uint16_t queue_id)
4964 {
4965         struct rte_eth_dev *dev;
4966         int ret;
4967
4968         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4969         dev = &rte_eth_devices[port_id];
4970
4971         ret = eth_dev_validate_rx_queue(dev, queue_id);
4972         if (ret != 0)
4973                 return ret;
4974
4975         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4976         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
4977 }
4978
4979
4980 const struct rte_eth_rxtx_callback *
4981 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4982                 rte_rx_callback_fn fn, void *user_param)
4983 {
4984 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4985         rte_errno = ENOTSUP;
4986         return NULL;
4987 #endif
4988         struct rte_eth_dev *dev;
4989
4990         /* check input parameters */
4991         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4992                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4993                 rte_errno = EINVAL;
4994                 return NULL;
4995         }
4996         dev = &rte_eth_devices[port_id];
4997         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4998                 rte_errno = EINVAL;
4999                 return NULL;
5000         }
5001         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5002
5003         if (cb == NULL) {
5004                 rte_errno = ENOMEM;
5005                 return NULL;
5006         }
5007
5008         cb->fn.rx = fn;
5009         cb->param = user_param;
5010
5011         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5012         /* Add the callbacks in fifo order. */
5013         struct rte_eth_rxtx_callback *tail =
5014                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5015
5016         if (!tail) {
5017                 /* Stores to cb->fn and cb->param should complete before
5018                  * cb is visible to data plane.
5019                  */
5020                 __atomic_store_n(
5021                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5022                         cb, __ATOMIC_RELEASE);
5023
5024         } else {
5025                 while (tail->next)
5026                         tail = tail->next;
5027                 /* Stores to cb->fn and cb->param should complete before
5028                  * cb is visible to data plane.
5029                  */
5030                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5031         }
5032         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5033
5034         return cb;
5035 }
5036
5037 const struct rte_eth_rxtx_callback *
5038 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5039                 rte_rx_callback_fn fn, void *user_param)
5040 {
5041 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5042         rte_errno = ENOTSUP;
5043         return NULL;
5044 #endif
5045         /* check input parameters */
5046         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5047                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5048                 rte_errno = EINVAL;
5049                 return NULL;
5050         }
5051
5052         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5053
5054         if (cb == NULL) {
5055                 rte_errno = ENOMEM;
5056                 return NULL;
5057         }
5058
5059         cb->fn.rx = fn;
5060         cb->param = user_param;
5061
5062         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5063         /* Add the callbacks at first position */
5064         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5065         /* Stores to cb->fn, cb->param and cb->next should complete before
5066          * cb is visible to data plane threads.
5067          */
5068         __atomic_store_n(
5069                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5070                 cb, __ATOMIC_RELEASE);
5071         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5072
5073         return cb;
5074 }
5075
5076 const struct rte_eth_rxtx_callback *
5077 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5078                 rte_tx_callback_fn fn, void *user_param)
5079 {
5080 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5081         rte_errno = ENOTSUP;
5082         return NULL;
5083 #endif
5084         struct rte_eth_dev *dev;
5085
5086         /* check input parameters */
5087         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5088                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
5089                 rte_errno = EINVAL;
5090                 return NULL;
5091         }
5092
5093         dev = &rte_eth_devices[port_id];
5094         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5095                 rte_errno = EINVAL;
5096                 return NULL;
5097         }
5098
5099         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5100
5101         if (cb == NULL) {
5102                 rte_errno = ENOMEM;
5103                 return NULL;
5104         }
5105
5106         cb->fn.tx = fn;
5107         cb->param = user_param;
5108
5109         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5110         /* Add the callbacks in fifo order. */
5111         struct rte_eth_rxtx_callback *tail =
5112                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
5113
5114         if (!tail) {
5115                 /* Stores to cb->fn and cb->param should complete before
5116                  * cb is visible to data plane.
5117                  */
5118                 __atomic_store_n(
5119                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
5120                         cb, __ATOMIC_RELEASE);
5121
5122         } else {
5123                 while (tail->next)
5124                         tail = tail->next;
5125                 /* Stores to cb->fn and cb->param should complete before
5126                  * cb is visible to data plane.
5127                  */
5128                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5129         }
5130         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5131
5132         return cb;
5133 }
5134
5135 int
5136 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5137                 const struct rte_eth_rxtx_callback *user_cb)
5138 {
5139 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5140         return -ENOTSUP;
5141 #endif
5142         /* Check input parameters. */
5143         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5144         if (user_cb == NULL ||
5145                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
5146                 return -EINVAL;
5147
5148         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5149         struct rte_eth_rxtx_callback *cb;
5150         struct rte_eth_rxtx_callback **prev_cb;
5151         int ret = -EINVAL;
5152
5153         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5154         prev_cb = &dev->post_rx_burst_cbs[queue_id];
5155         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5156                 cb = *prev_cb;
5157                 if (cb == user_cb) {
5158                         /* Remove the user cb from the callback list. */
5159                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5160                         ret = 0;
5161                         break;
5162                 }
5163         }
5164         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5165
5166         return ret;
5167 }
5168
5169 int
5170 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5171                 const struct rte_eth_rxtx_callback *user_cb)
5172 {
5173 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5174         return -ENOTSUP;
5175 #endif
5176         /* Check input parameters. */
5177         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5178         if (user_cb == NULL ||
5179                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5180                 return -EINVAL;
5181
5182         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5183         int ret = -EINVAL;
5184         struct rte_eth_rxtx_callback *cb;
5185         struct rte_eth_rxtx_callback **prev_cb;
5186
5187         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5188         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5189         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5190                 cb = *prev_cb;
5191                 if (cb == user_cb) {
5192                         /* Remove the user cb from the callback list. */
5193                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5194                         ret = 0;
5195                         break;
5196                 }
5197         }
5198         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5199
5200         return ret;
5201 }
5202
5203 int
5204 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5205         struct rte_eth_rxq_info *qinfo)
5206 {
5207         struct rte_eth_dev *dev;
5208
5209         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5210         dev = &rte_eth_devices[port_id];
5211
5212         if (queue_id >= dev->data->nb_rx_queues) {
5213                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5214                 return -EINVAL;
5215         }
5216
5217         if (qinfo == NULL) {
5218                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5219                         port_id, queue_id);
5220                 return -EINVAL;
5221         }
5222
5223         if (dev->data->rx_queues == NULL ||
5224                         dev->data->rx_queues[queue_id] == NULL) {
5225                 RTE_ETHDEV_LOG(ERR,
5226                                "Rx queue %"PRIu16" of device with port_id=%"
5227                                PRIu16" has not been setup\n",
5228                                queue_id, port_id);
5229                 return -EINVAL;
5230         }
5231
5232         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5233                 RTE_ETHDEV_LOG(INFO,
5234                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5235                         queue_id, port_id);
5236                 return -EINVAL;
5237         }
5238
5239         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5240
5241         memset(qinfo, 0, sizeof(*qinfo));
5242         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5243         qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5244
5245         return 0;
5246 }
5247
5248 int
5249 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5250         struct rte_eth_txq_info *qinfo)
5251 {
5252         struct rte_eth_dev *dev;
5253
5254         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5255         dev = &rte_eth_devices[port_id];
5256
5257         if (queue_id >= dev->data->nb_tx_queues) {
5258                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5259                 return -EINVAL;
5260         }
5261
5262         if (qinfo == NULL) {
5263                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5264                         port_id, queue_id);
5265                 return -EINVAL;
5266         }
5267
5268         if (dev->data->tx_queues == NULL ||
5269                         dev->data->tx_queues[queue_id] == NULL) {
5270                 RTE_ETHDEV_LOG(ERR,
5271                                "Tx queue %"PRIu16" of device with port_id=%"
5272                                PRIu16" has not been setup\n",
5273                                queue_id, port_id);
5274                 return -EINVAL;
5275         }
5276
5277         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5278                 RTE_ETHDEV_LOG(INFO,
5279                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5280                         queue_id, port_id);
5281                 return -EINVAL;
5282         }
5283
5284         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5285
5286         memset(qinfo, 0, sizeof(*qinfo));
5287         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5288         qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5289
5290         return 0;
5291 }
5292
5293 int
5294 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5295                           struct rte_eth_burst_mode *mode)
5296 {
5297         struct rte_eth_dev *dev;
5298
5299         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5300         dev = &rte_eth_devices[port_id];
5301
5302         if (queue_id >= dev->data->nb_rx_queues) {
5303                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5304                 return -EINVAL;
5305         }
5306
5307         if (mode == NULL) {
5308                 RTE_ETHDEV_LOG(ERR,
5309                         "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5310                         port_id, queue_id);
5311                 return -EINVAL;
5312         }
5313
5314         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5315         memset(mode, 0, sizeof(*mode));
5316         return eth_err(port_id,
5317                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5318 }
5319
5320 int
5321 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5322                           struct rte_eth_burst_mode *mode)
5323 {
5324         struct rte_eth_dev *dev;
5325
5326         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5327         dev = &rte_eth_devices[port_id];
5328
5329         if (queue_id >= dev->data->nb_tx_queues) {
5330                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5331                 return -EINVAL;
5332         }
5333
5334         if (mode == NULL) {
5335                 RTE_ETHDEV_LOG(ERR,
5336                         "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5337                         port_id, queue_id);
5338                 return -EINVAL;
5339         }
5340
5341         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5342         memset(mode, 0, sizeof(*mode));
5343         return eth_err(port_id,
5344                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5345 }
5346
5347 int
5348 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5349                 struct rte_power_monitor_cond *pmc)
5350 {
5351         struct rte_eth_dev *dev;
5352
5353         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5354         dev = &rte_eth_devices[port_id];
5355
5356         if (queue_id >= dev->data->nb_rx_queues) {
5357                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5358                 return -EINVAL;
5359         }
5360
5361         if (pmc == NULL) {
5362                 RTE_ETHDEV_LOG(ERR,
5363                         "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5364                         port_id, queue_id);
5365                 return -EINVAL;
5366         }
5367
5368         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
5369         return eth_err(port_id,
5370                 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5371 }
5372
5373 int
5374 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5375                              struct rte_ether_addr *mc_addr_set,
5376                              uint32_t nb_mc_addr)
5377 {
5378         struct rte_eth_dev *dev;
5379
5380         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5381         dev = &rte_eth_devices[port_id];
5382
5383         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5384         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5385                                                 mc_addr_set, nb_mc_addr));
5386 }
5387
5388 int
5389 rte_eth_timesync_enable(uint16_t port_id)
5390 {
5391         struct rte_eth_dev *dev;
5392
5393         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5394         dev = &rte_eth_devices[port_id];
5395
5396         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5397         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5398 }
5399
5400 int
5401 rte_eth_timesync_disable(uint16_t port_id)
5402 {
5403         struct rte_eth_dev *dev;
5404
5405         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5406         dev = &rte_eth_devices[port_id];
5407
5408         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5409         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5410 }
5411
5412 int
5413 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5414                                    uint32_t flags)
5415 {
5416         struct rte_eth_dev *dev;
5417
5418         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5419         dev = &rte_eth_devices[port_id];
5420
5421         if (timestamp == NULL) {
5422                 RTE_ETHDEV_LOG(ERR,
5423                         "Cannot read ethdev port %u Rx timestamp to NULL\n",
5424                         port_id);
5425                 return -EINVAL;
5426         }
5427
5428         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5429         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5430                                 (dev, timestamp, flags));
5431 }
5432
5433 int
5434 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5435                                    struct timespec *timestamp)
5436 {
5437         struct rte_eth_dev *dev;
5438
5439         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5440         dev = &rte_eth_devices[port_id];
5441
5442         if (timestamp == NULL) {
5443                 RTE_ETHDEV_LOG(ERR,
5444                         "Cannot read ethdev port %u Tx timestamp to NULL\n",
5445                         port_id);
5446                 return -EINVAL;
5447         }
5448
5449         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5450         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5451                                 (dev, timestamp));
5452 }
5453
5454 int
5455 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5456 {
5457         struct rte_eth_dev *dev;
5458
5459         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5460         dev = &rte_eth_devices[port_id];
5461
5462         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5463         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5464 }
5465
5466 int
5467 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5468 {
5469         struct rte_eth_dev *dev;
5470
5471         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5472         dev = &rte_eth_devices[port_id];
5473
5474         if (timestamp == NULL) {
5475                 RTE_ETHDEV_LOG(ERR,
5476                         "Cannot read ethdev port %u timesync time to NULL\n",
5477                         port_id);
5478                 return -EINVAL;
5479         }
5480
5481         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5482         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5483                                                                 timestamp));
5484 }
5485
5486 int
5487 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5488 {
5489         struct rte_eth_dev *dev;
5490
5491         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5492         dev = &rte_eth_devices[port_id];
5493
5494         if (timestamp == NULL) {
5495                 RTE_ETHDEV_LOG(ERR,
5496                         "Cannot write ethdev port %u timesync from NULL time\n",
5497                         port_id);
5498                 return -EINVAL;
5499         }
5500
5501         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5502         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5503                                                                 timestamp));
5504 }
5505
5506 int
5507 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5508 {
5509         struct rte_eth_dev *dev;
5510
5511         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5512         dev = &rte_eth_devices[port_id];
5513
5514         if (clock == NULL) {
5515                 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5516                         port_id);
5517                 return -EINVAL;
5518         }
5519
5520         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5521         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5522 }
5523
5524 int
5525 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5526 {
5527         struct rte_eth_dev *dev;
5528
5529         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5530         dev = &rte_eth_devices[port_id];
5531
5532         if (info == NULL) {
5533                 RTE_ETHDEV_LOG(ERR,
5534                         "Cannot get ethdev port %u register info to NULL\n",
5535                         port_id);
5536                 return -EINVAL;
5537         }
5538
5539         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5540         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5541 }
5542
5543 int
5544 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5545 {
5546         struct rte_eth_dev *dev;
5547
5548         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5549         dev = &rte_eth_devices[port_id];
5550
5551         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5552         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5553 }
5554
5555 int
5556 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5557 {
5558         struct rte_eth_dev *dev;
5559
5560         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5561         dev = &rte_eth_devices[port_id];
5562
5563         if (info == NULL) {
5564                 RTE_ETHDEV_LOG(ERR,
5565                         "Cannot get ethdev port %u EEPROM info to NULL\n",
5566                         port_id);
5567                 return -EINVAL;
5568         }
5569
5570         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5571         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5572 }
5573
5574 int
5575 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5576 {
5577         struct rte_eth_dev *dev;
5578
5579         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5580         dev = &rte_eth_devices[port_id];
5581
5582         if (info == NULL) {
5583                 RTE_ETHDEV_LOG(ERR,
5584                         "Cannot set ethdev port %u EEPROM from NULL info\n",
5585                         port_id);
5586                 return -EINVAL;
5587         }
5588
5589         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5590         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5591 }
5592
5593 int
5594 rte_eth_dev_get_module_info(uint16_t port_id,
5595                             struct rte_eth_dev_module_info *modinfo)
5596 {
5597         struct rte_eth_dev *dev;
5598
5599         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5600         dev = &rte_eth_devices[port_id];
5601
5602         if (modinfo == NULL) {
5603                 RTE_ETHDEV_LOG(ERR,
5604                         "Cannot get ethdev port %u EEPROM module info to NULL\n",
5605                         port_id);
5606                 return -EINVAL;
5607         }
5608
5609         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5610         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5611 }
5612
5613 int
5614 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5615                               struct rte_dev_eeprom_info *info)
5616 {
5617         struct rte_eth_dev *dev;
5618
5619         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5620         dev = &rte_eth_devices[port_id];
5621
5622         if (info == NULL) {
5623                 RTE_ETHDEV_LOG(ERR,
5624                         "Cannot get ethdev port %u module EEPROM info to NULL\n",
5625                         port_id);
5626                 return -EINVAL;
5627         }
5628
5629         if (info->data == NULL) {
5630                 RTE_ETHDEV_LOG(ERR,
5631                         "Cannot get ethdev port %u module EEPROM data to NULL\n",
5632                         port_id);
5633                 return -EINVAL;
5634         }
5635
5636         if (info->length == 0) {
5637                 RTE_ETHDEV_LOG(ERR,
5638                         "Cannot get ethdev port %u module EEPROM to data with zero size\n",
5639                         port_id);
5640                 return -EINVAL;
5641         }
5642
5643         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5644         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5645 }
5646
5647 int
5648 rte_eth_dev_get_dcb_info(uint16_t port_id,
5649                              struct rte_eth_dcb_info *dcb_info)
5650 {
5651         struct rte_eth_dev *dev;
5652
5653         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5654         dev = &rte_eth_devices[port_id];
5655
5656         if (dcb_info == NULL) {
5657                 RTE_ETHDEV_LOG(ERR,
5658                         "Cannot get ethdev port %u DCB info to NULL\n",
5659                         port_id);
5660                 return -EINVAL;
5661         }
5662
5663         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5664
5665         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5666         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5667 }
5668
5669 static void
5670 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5671                 const struct rte_eth_desc_lim *desc_lim)
5672 {
5673         if (desc_lim->nb_align != 0)
5674                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5675
5676         if (desc_lim->nb_max != 0)
5677                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5678
5679         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5680 }
5681
5682 int
5683 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5684                                  uint16_t *nb_rx_desc,
5685                                  uint16_t *nb_tx_desc)
5686 {
5687         struct rte_eth_dev_info dev_info;
5688         int ret;
5689
5690         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5691
5692         ret = rte_eth_dev_info_get(port_id, &dev_info);
5693         if (ret != 0)
5694                 return ret;
5695
5696         if (nb_rx_desc != NULL)
5697                 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5698
5699         if (nb_tx_desc != NULL)
5700                 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5701
5702         return 0;
5703 }
5704
5705 int
5706 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5707                                    struct rte_eth_hairpin_cap *cap)
5708 {
5709         struct rte_eth_dev *dev;
5710
5711         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5712         dev = &rte_eth_devices[port_id];
5713
5714         if (cap == NULL) {
5715                 RTE_ETHDEV_LOG(ERR,
5716                         "Cannot get ethdev port %u hairpin capability to NULL\n",
5717                         port_id);
5718                 return -EINVAL;
5719         }
5720
5721         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5722         memset(cap, 0, sizeof(*cap));
5723         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5724 }
5725
5726 int
5727 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5728 {
5729         if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5730                 return 1;
5731         return 0;
5732 }
5733
5734 int
5735 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5736 {
5737         if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5738                 return 1;
5739         return 0;
5740 }
5741
5742 int
5743 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5744 {
5745         struct rte_eth_dev *dev;
5746
5747         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5748         dev = &rte_eth_devices[port_id];
5749
5750         if (pool == NULL) {
5751                 RTE_ETHDEV_LOG(ERR,
5752                         "Cannot test ethdev port %u mempool operation from NULL pool\n",
5753                         port_id);
5754                 return -EINVAL;
5755         }
5756
5757         if (*dev->dev_ops->pool_ops_supported == NULL)
5758                 return 1; /* all pools are supported */
5759
5760         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5761 }
5762
5763 /**
5764  * A set of values to describe the possible states of a switch domain.
5765  */
5766 enum rte_eth_switch_domain_state {
5767         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5768         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5769 };
5770
5771 /**
5772  * Array of switch domains available for allocation. Array is sized to
5773  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5774  * ethdev ports in a single process.
5775  */
5776 static struct rte_eth_dev_switch {
5777         enum rte_eth_switch_domain_state state;
5778 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
5779
5780 int
5781 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5782 {
5783         uint16_t i;
5784
5785         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5786
5787         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5788                 if (eth_dev_switch_domains[i].state ==
5789                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5790                         eth_dev_switch_domains[i].state =
5791                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5792                         *domain_id = i;
5793                         return 0;
5794                 }
5795         }
5796
5797         return -ENOSPC;
5798 }
5799
5800 int
5801 rte_eth_switch_domain_free(uint16_t domain_id)
5802 {
5803         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5804                 domain_id >= RTE_MAX_ETHPORTS)
5805                 return -EINVAL;
5806
5807         if (eth_dev_switch_domains[domain_id].state !=
5808                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5809                 return -EINVAL;
5810
5811         eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5812
5813         return 0;
5814 }
5815
5816 static int
5817 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5818 {
5819         int state;
5820         struct rte_kvargs_pair *pair;
5821         char *letter;
5822
5823         arglist->str = strdup(str_in);
5824         if (arglist->str == NULL)
5825                 return -ENOMEM;
5826
5827         letter = arglist->str;
5828         state = 0;
5829         arglist->count = 0;
5830         pair = &arglist->pairs[0];
5831         while (1) {
5832                 switch (state) {
5833                 case 0: /* Initial */
5834                         if (*letter == '=')
5835                                 return -EINVAL;
5836                         else if (*letter == '\0')
5837                                 return 0;
5838
5839                         state = 1;
5840                         pair->key = letter;
5841                         /* fall-thru */
5842
5843                 case 1: /* Parsing key */
5844                         if (*letter == '=') {
5845                                 *letter = '\0';
5846                                 pair->value = letter + 1;
5847                                 state = 2;
5848                         } else if (*letter == ',' || *letter == '\0')
5849                                 return -EINVAL;
5850                         break;
5851
5852
5853                 case 2: /* Parsing value */
5854                         if (*letter == '[')
5855                                 state = 3;
5856                         else if (*letter == ',') {
5857                                 *letter = '\0';
5858                                 arglist->count++;
5859                                 pair = &arglist->pairs[arglist->count];
5860                                 state = 0;
5861                         } else if (*letter == '\0') {
5862                                 letter--;
5863                                 arglist->count++;
5864                                 pair = &arglist->pairs[arglist->count];
5865                                 state = 0;
5866                         }
5867                         break;
5868
5869                 case 3: /* Parsing list */
5870                         if (*letter == ']')
5871                                 state = 2;
5872                         else if (*letter == '\0')
5873                                 return -EINVAL;
5874                         break;
5875                 }
5876                 letter++;
5877         }
5878 }
5879
5880 int
5881 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5882 {
5883         struct rte_kvargs args;
5884         struct rte_kvargs_pair *pair;
5885         unsigned int i;
5886         int result = 0;
5887
5888         memset(eth_da, 0, sizeof(*eth_da));
5889
5890         result = eth_dev_devargs_tokenise(&args, dargs);
5891         if (result < 0)
5892                 goto parse_cleanup;
5893
5894         for (i = 0; i < args.count; i++) {
5895                 pair = &args.pairs[i];
5896                 if (strcmp("representor", pair->key) == 0) {
5897                         if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
5898                                 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
5899                                         dargs);
5900                                 result = -1;
5901                                 goto parse_cleanup;
5902                         }
5903                         result = rte_eth_devargs_parse_representor_ports(
5904                                         pair->value, eth_da);
5905                         if (result < 0)
5906                                 goto parse_cleanup;
5907                 }
5908         }
5909
5910 parse_cleanup:
5911         if (args.str)
5912                 free(args.str);
5913
5914         return result;
5915 }
5916
5917 int
5918 rte_eth_representor_id_get(uint16_t port_id,
5919                            enum rte_eth_representor_type type,
5920                            int controller, int pf, int representor_port,
5921                            uint16_t *repr_id)
5922 {
5923         int ret, n, count;
5924         uint32_t i;
5925         struct rte_eth_representor_info *info = NULL;
5926         size_t size;
5927
5928         if (type == RTE_ETH_REPRESENTOR_NONE)
5929                 return 0;
5930         if (repr_id == NULL)
5931                 return -EINVAL;
5932
5933         /* Get PMD representor range info. */
5934         ret = rte_eth_representor_info_get(port_id, NULL);
5935         if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
5936             controller == -1 && pf == -1) {
5937                 /* Direct mapping for legacy VF representor. */
5938                 *repr_id = representor_port;
5939                 return 0;
5940         } else if (ret < 0) {
5941                 return ret;
5942         }
5943         n = ret;
5944         size = sizeof(*info) + n * sizeof(info->ranges[0]);
5945         info = calloc(1, size);
5946         if (info == NULL)
5947                 return -ENOMEM;
5948         info->nb_ranges_alloc = n;
5949         ret = rte_eth_representor_info_get(port_id, info);
5950         if (ret < 0)
5951                 goto out;
5952
5953         /* Default controller and pf to caller. */
5954         if (controller == -1)
5955                 controller = info->controller;
5956         if (pf == -1)
5957                 pf = info->pf;
5958
5959         /* Locate representor ID. */
5960         ret = -ENOENT;
5961         for (i = 0; i < info->nb_ranges; ++i) {
5962                 if (info->ranges[i].type != type)
5963                         continue;
5964                 if (info->ranges[i].controller != controller)
5965                         continue;
5966                 if (info->ranges[i].id_end < info->ranges[i].id_base) {
5967                         RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
5968                                 port_id, info->ranges[i].id_base,
5969                                 info->ranges[i].id_end, i);
5970                         continue;
5971
5972                 }
5973                 count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
5974                 switch (info->ranges[i].type) {
5975                 case RTE_ETH_REPRESENTOR_PF:
5976                         if (pf < info->ranges[i].pf ||
5977                             pf >= info->ranges[i].pf + count)
5978                                 continue;
5979                         *repr_id = info->ranges[i].id_base +
5980                                    (pf - info->ranges[i].pf);
5981                         ret = 0;
5982                         goto out;
5983                 case RTE_ETH_REPRESENTOR_VF:
5984                         if (info->ranges[i].pf != pf)
5985                                 continue;
5986                         if (representor_port < info->ranges[i].vf ||
5987                             representor_port >= info->ranges[i].vf + count)
5988                                 continue;
5989                         *repr_id = info->ranges[i].id_base +
5990                                    (representor_port - info->ranges[i].vf);
5991                         ret = 0;
5992                         goto out;
5993                 case RTE_ETH_REPRESENTOR_SF:
5994                         if (info->ranges[i].pf != pf)
5995                                 continue;
5996                         if (representor_port < info->ranges[i].sf ||
5997                             representor_port >= info->ranges[i].sf + count)
5998                                 continue;
5999                         *repr_id = info->ranges[i].id_base +
6000                               (representor_port - info->ranges[i].sf);
6001                         ret = 0;
6002                         goto out;
6003                 default:
6004                         break;
6005                 }
6006         }
6007 out:
6008         free(info);
6009         return ret;
6010 }
6011
6012 static int
6013 eth_dev_handle_port_list(const char *cmd __rte_unused,
6014                 const char *params __rte_unused,
6015                 struct rte_tel_data *d)
6016 {
6017         int port_id;
6018
6019         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
6020         RTE_ETH_FOREACH_DEV(port_id)
6021                 rte_tel_data_add_array_int(d, port_id);
6022         return 0;
6023 }
6024
6025 static void
6026 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
6027                 const char *stat_name)
6028 {
6029         int q;
6030         struct rte_tel_data *q_data = rte_tel_data_alloc();
6031         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
6032         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
6033                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
6034         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
6035 }
6036
6037 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
6038
6039 static int
6040 eth_dev_handle_port_stats(const char *cmd __rte_unused,
6041                 const char *params,
6042                 struct rte_tel_data *d)
6043 {
6044         struct rte_eth_stats stats;
6045         int port_id, ret;
6046
6047         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6048                 return -1;
6049
6050         port_id = atoi(params);
6051         if (!rte_eth_dev_is_valid_port(port_id))
6052                 return -1;
6053
6054         ret = rte_eth_stats_get(port_id, &stats);
6055         if (ret < 0)
6056                 return -1;
6057
6058         rte_tel_data_start_dict(d);
6059         ADD_DICT_STAT(stats, ipackets);
6060         ADD_DICT_STAT(stats, opackets);
6061         ADD_DICT_STAT(stats, ibytes);
6062         ADD_DICT_STAT(stats, obytes);
6063         ADD_DICT_STAT(stats, imissed);
6064         ADD_DICT_STAT(stats, ierrors);
6065         ADD_DICT_STAT(stats, oerrors);
6066         ADD_DICT_STAT(stats, rx_nombuf);
6067         eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
6068         eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
6069         eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
6070         eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
6071         eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
6072
6073         return 0;
6074 }
6075
6076 static int
6077 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
6078                 const char *params,
6079                 struct rte_tel_data *d)
6080 {
6081         struct rte_eth_xstat *eth_xstats;
6082         struct rte_eth_xstat_name *xstat_names;
6083         int port_id, num_xstats;
6084         int i, ret;
6085         char *end_param;
6086
6087         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6088                 return -1;
6089
6090         port_id = strtoul(params, &end_param, 0);
6091         if (*end_param != '\0')
6092                 RTE_ETHDEV_LOG(NOTICE,
6093                         "Extra parameters passed to ethdev telemetry command, ignoring");
6094         if (!rte_eth_dev_is_valid_port(port_id))
6095                 return -1;
6096
6097         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
6098         if (num_xstats < 0)
6099                 return -1;
6100
6101         /* use one malloc for both names and stats */
6102         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
6103                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
6104         if (eth_xstats == NULL)
6105                 return -1;
6106         xstat_names = (void *)&eth_xstats[num_xstats];
6107
6108         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
6109         if (ret < 0 || ret > num_xstats) {
6110                 free(eth_xstats);
6111                 return -1;
6112         }
6113
6114         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
6115         if (ret < 0 || ret > num_xstats) {
6116                 free(eth_xstats);
6117                 return -1;
6118         }
6119
6120         rte_tel_data_start_dict(d);
6121         for (i = 0; i < num_xstats; i++)
6122                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
6123                                 eth_xstats[i].value);
6124         return 0;
6125 }
6126
6127 static int
6128 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
6129                 const char *params,
6130                 struct rte_tel_data *d)
6131 {
6132         static const char *status_str = "status";
6133         int ret, port_id;
6134         struct rte_eth_link link;
6135         char *end_param;
6136
6137         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6138                 return -1;
6139
6140         port_id = strtoul(params, &end_param, 0);
6141         if (*end_param != '\0')
6142                 RTE_ETHDEV_LOG(NOTICE,
6143                         "Extra parameters passed to ethdev telemetry command, ignoring");
6144         if (!rte_eth_dev_is_valid_port(port_id))
6145                 return -1;
6146
6147         ret = rte_eth_link_get_nowait(port_id, &link);
6148         if (ret < 0)
6149                 return -1;
6150
6151         rte_tel_data_start_dict(d);
6152         if (!link.link_status) {
6153                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
6154                 return 0;
6155         }
6156         rte_tel_data_add_dict_string(d, status_str, "UP");
6157         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
6158         rte_tel_data_add_dict_string(d, "duplex",
6159                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
6160                                 "full-duplex" : "half-duplex");
6161         return 0;
6162 }
6163
6164 int
6165 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
6166                                   struct rte_hairpin_peer_info *cur_info,
6167                                   struct rte_hairpin_peer_info *peer_info,
6168                                   uint32_t direction)
6169 {
6170         struct rte_eth_dev *dev;
6171
6172         /* Current queue information is not mandatory. */
6173         if (peer_info == NULL)
6174                 return -EINVAL;
6175
6176         /* No need to check the validity again. */
6177         dev = &rte_eth_devices[peer_port];
6178         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
6179                                 -ENOTSUP);
6180
6181         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
6182                                         cur_info, peer_info, direction);
6183 }
6184
6185 int
6186 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
6187                                 struct rte_hairpin_peer_info *peer_info,
6188                                 uint32_t direction)
6189 {
6190         struct rte_eth_dev *dev;
6191
6192         if (peer_info == NULL)
6193                 return -EINVAL;
6194
6195         /* No need to check the validity again. */
6196         dev = &rte_eth_devices[cur_port];
6197         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
6198                                 -ENOTSUP);
6199
6200         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
6201                                                         peer_info, direction);
6202 }
6203
6204 int
6205 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
6206                                   uint32_t direction)
6207 {
6208         struct rte_eth_dev *dev;
6209
6210         /* No need to check the validity again. */
6211         dev = &rte_eth_devices[cur_port];
6212         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
6213                                 -ENOTSUP);
6214
6215         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
6216                                                           direction);
6217 }
6218
6219 int
6220 rte_eth_representor_info_get(uint16_t port_id,
6221                              struct rte_eth_representor_info *info)
6222 {
6223         struct rte_eth_dev *dev;
6224
6225         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6226         dev = &rte_eth_devices[port_id];
6227
6228         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
6229         return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6230 }
6231
6232 int
6233 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
6234 {
6235         struct rte_eth_dev *dev;
6236
6237         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6238         dev = &rte_eth_devices[port_id];
6239
6240         if (dev->data->dev_configured != 0) {
6241                 RTE_ETHDEV_LOG(ERR,
6242                         "The port (id=%"PRIu16") is already configured\n",
6243                         port_id);
6244                 return -EBUSY;
6245         }
6246
6247         if (features == NULL) {
6248                 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
6249                 return -EINVAL;
6250         }
6251
6252         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP);
6253         return eth_err(port_id,
6254                        (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
6255 }
6256
6257 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
6258
6259 RTE_INIT(ethdev_init_telemetry)
6260 {
6261         rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
6262                         "Returns list of available ethdev ports. Takes no parameters");
6263         rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
6264                         "Returns the common stats for a port. Parameters: int port_id");
6265         rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
6266                         "Returns the extended stats for a port. Parameters: int port_id");
6267         rte_telemetry_register_cmd("/ethdev/link_status",
6268                         eth_dev_handle_port_link_status,
6269                         "Returns the link status for a port. Parameters: int port_id");
6270 }