ethdev: improve xstats names by IDs get prototype
[dpdk.git] / lib / ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13
14 #include <rte_byteorder.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_interrupts.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_memzone.h>
21 #include <rte_launch.h>
22 #include <rte_eal.h>
23 #include <rte_per_lcore.h>
24 #include <rte_lcore.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_common.h>
27 #include <rte_mempool.h>
28 #include <rte_malloc.h>
29 #include <rte_mbuf.h>
30 #include <rte_errno.h>
31 #include <rte_spinlock.h>
32 #include <rte_string_fns.h>
33 #include <rte_kvargs.h>
34 #include <rte_class.h>
35 #include <rte_ether.h>
36 #include <rte_telemetry.h>
37
38 #include "rte_ethdev_trace.h"
39 #include "rte_ethdev.h"
40 #include "ethdev_driver.h"
41 #include "ethdev_profile.h"
42 #include "ethdev_private.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46
47 /* spinlock for eth device callbacks */
48 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* spinlock for add/remove rx callbacks */
51 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
52
53 /* spinlock for add/remove tx callbacks */
54 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for shared data allocation */
57 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* store statistics names and its offset in stats structure  */
60 struct rte_eth_xstats_name_off {
61         char name[RTE_ETH_XSTATS_NAME_SIZE];
62         unsigned offset;
63 };
64
65 /* Shared memory between primary and secondary processes. */
66 static struct {
67         uint64_t next_owner_id;
68         rte_spinlock_t ownership_lock;
69         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
70 } *eth_dev_shared_data;
71
72 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
73         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
74         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
75         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
76         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
77         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
78         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
79         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
80         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
81                 rx_nombuf)},
82 };
83
84 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
85
86 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
87         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
88         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
89         {"errors", offsetof(struct rte_eth_stats, q_errors)},
90 };
91
92 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
93
94 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
95         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
96         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
97 };
98 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
99
100 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
101         { DEV_RX_OFFLOAD_##_name, #_name }
102
103 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)       \
104         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
105
106 static const struct {
107         uint64_t offload;
108         const char *name;
109 } eth_dev_rx_offload_names[] = {
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
111         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
112         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
113         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
115         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
119         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
120         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
121         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
122         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
123         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
124         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
125         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
126         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
127         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
128         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
129         RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
130 };
131
132 #undef RTE_RX_OFFLOAD_BIT2STR
133 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } eth_dev_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
163         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
164 };
165
166 #undef RTE_TX_OFFLOAD_BIT2STR
167
168 /**
169  * The user application callback description.
170  *
171  * It contains callback address to be registered by user application,
172  * the pointer to the parameters for callback, and the event type.
173  */
174 struct rte_eth_dev_callback {
175         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
176         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
177         void *cb_arg;                           /**< Parameter for callback */
178         void *ret_param;                        /**< Return parameter */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 int
189 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
190 {
191         int ret;
192         struct rte_devargs devargs;
193         const char *bus_param_key;
194         char *bus_str = NULL;
195         char *cls_str = NULL;
196         int str_size;
197
198         if (iter == NULL) {
199                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
200                 return -EINVAL;
201         }
202
203         if (devargs_str == NULL) {
204                 RTE_ETHDEV_LOG(ERR,
205                         "Cannot initialize iterator from NULL device description string\n");
206                 return -EINVAL;
207         }
208
209         memset(iter, 0, sizeof(*iter));
210         memset(&devargs, 0, sizeof(devargs));
211
212         /*
213          * The devargs string may use various syntaxes:
214          *   - 0000:08:00.0,representor=[1-3]
215          *   - pci:0000:06:00.0,representor=[0,5]
216          *   - class=eth,mac=00:11:22:33:44:55
217          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
218          */
219
220         /*
221          * Handle pure class filter (i.e. without any bus-level argument),
222          * from future new syntax.
223          * rte_devargs_parse() is not yet supporting the new syntax,
224          * that's why this simple case is temporarily parsed here.
225          */
226 #define iter_anybus_str "class=eth,"
227         if (strncmp(devargs_str, iter_anybus_str,
228                         strlen(iter_anybus_str)) == 0) {
229                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
230                 goto end;
231         }
232
233         /* Split bus, device and parameters. */
234         ret = rte_devargs_parse(&devargs, devargs_str);
235         if (ret != 0)
236                 goto error;
237
238         /*
239          * Assume parameters of old syntax can match only at ethdev level.
240          * Extra parameters will be ignored, thanks to "+" prefix.
241          */
242         str_size = strlen(devargs.args) + 2;
243         cls_str = malloc(str_size);
244         if (cls_str == NULL) {
245                 ret = -ENOMEM;
246                 goto error;
247         }
248         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
249         if (ret != str_size - 1) {
250                 ret = -EINVAL;
251                 goto error;
252         }
253         iter->cls_str = cls_str;
254
255         iter->bus = devargs.bus;
256         if (iter->bus->dev_iterate == NULL) {
257                 ret = -ENOTSUP;
258                 goto error;
259         }
260
261         /* Convert bus args to new syntax for use with new API dev_iterate. */
262         if ((strcmp(iter->bus->name, "vdev") == 0) ||
263                 (strcmp(iter->bus->name, "fslmc") == 0) ||
264                 (strcmp(iter->bus->name, "dpaa_bus") == 0)) {
265                 bus_param_key = "name";
266         } else if (strcmp(iter->bus->name, "pci") == 0) {
267                 bus_param_key = "addr";
268         } else {
269                 ret = -ENOTSUP;
270                 goto error;
271         }
272         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
273         bus_str = malloc(str_size);
274         if (bus_str == NULL) {
275                 ret = -ENOMEM;
276                 goto error;
277         }
278         ret = snprintf(bus_str, str_size, "%s=%s",
279                         bus_param_key, devargs.name);
280         if (ret != str_size - 1) {
281                 ret = -EINVAL;
282                 goto error;
283         }
284         iter->bus_str = bus_str;
285
286 end:
287         iter->cls = rte_class_find_by_name("eth");
288         rte_devargs_reset(&devargs);
289         return 0;
290
291 error:
292         if (ret == -ENOTSUP)
293                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
294                                 iter->bus->name);
295         rte_devargs_reset(&devargs);
296         free(bus_str);
297         free(cls_str);
298         return ret;
299 }
300
301 uint16_t
302 rte_eth_iterator_next(struct rte_dev_iterator *iter)
303 {
304         if (iter == NULL) {
305                 RTE_ETHDEV_LOG(ERR,
306                         "Cannot get next device from NULL iterator\n");
307                 return RTE_MAX_ETHPORTS;
308         }
309
310         if (iter->cls == NULL) /* invalid ethdev iterator */
311                 return RTE_MAX_ETHPORTS;
312
313         do { /* loop to try all matching rte_device */
314                 /* If not pure ethdev filter and */
315                 if (iter->bus != NULL &&
316                                 /* not in middle of rte_eth_dev iteration, */
317                                 iter->class_device == NULL) {
318                         /* get next rte_device to try. */
319                         iter->device = iter->bus->dev_iterate(
320                                         iter->device, iter->bus_str, iter);
321                         if (iter->device == NULL)
322                                 break; /* no more rte_device candidate */
323                 }
324                 /* A device is matching bus part, need to check ethdev part. */
325                 iter->class_device = iter->cls->dev_iterate(
326                                 iter->class_device, iter->cls_str, iter);
327                 if (iter->class_device != NULL)
328                         return eth_dev_to_id(iter->class_device); /* match */
329         } while (iter->bus != NULL); /* need to try next rte_device */
330
331         /* No more ethdev port to iterate. */
332         rte_eth_iterator_cleanup(iter);
333         return RTE_MAX_ETHPORTS;
334 }
335
336 void
337 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
338 {
339         if (iter == NULL) {
340                 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
341                 return;
342         }
343
344         if (iter->bus_str == NULL)
345                 return; /* nothing to free in pure class filter */
346         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
347         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
348         memset(iter, 0, sizeof(*iter));
349 }
350
351 uint16_t
352 rte_eth_find_next(uint16_t port_id)
353 {
354         while (port_id < RTE_MAX_ETHPORTS &&
355                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
356                 port_id++;
357
358         if (port_id >= RTE_MAX_ETHPORTS)
359                 return RTE_MAX_ETHPORTS;
360
361         return port_id;
362 }
363
364 /*
365  * Macro to iterate over all valid ports for internal usage.
366  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
367  */
368 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
369         for (port_id = rte_eth_find_next(0); \
370              port_id < RTE_MAX_ETHPORTS; \
371              port_id = rte_eth_find_next(port_id + 1))
372
373 uint16_t
374 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
375 {
376         port_id = rte_eth_find_next(port_id);
377         while (port_id < RTE_MAX_ETHPORTS &&
378                         rte_eth_devices[port_id].device != parent)
379                 port_id = rte_eth_find_next(port_id + 1);
380
381         return port_id;
382 }
383
384 uint16_t
385 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
386 {
387         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
388         return rte_eth_find_next_of(port_id,
389                         rte_eth_devices[ref_port_id].device);
390 }
391
392 static void
393 eth_dev_shared_data_prepare(void)
394 {
395         const unsigned flags = 0;
396         const struct rte_memzone *mz;
397
398         rte_spinlock_lock(&eth_dev_shared_data_lock);
399
400         if (eth_dev_shared_data == NULL) {
401                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
402                         /* Allocate port data and ownership shared memory. */
403                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
404                                         sizeof(*eth_dev_shared_data),
405                                         rte_socket_id(), flags);
406                 } else
407                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
408                 if (mz == NULL)
409                         rte_panic("Cannot allocate ethdev shared data\n");
410
411                 eth_dev_shared_data = mz->addr;
412                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
413                         eth_dev_shared_data->next_owner_id =
414                                         RTE_ETH_DEV_NO_OWNER + 1;
415                         rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
416                         memset(eth_dev_shared_data->data, 0,
417                                sizeof(eth_dev_shared_data->data));
418                 }
419         }
420
421         rte_spinlock_unlock(&eth_dev_shared_data_lock);
422 }
423
424 static bool
425 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
426 {
427         return ethdev->data->name[0] != '\0';
428 }
429
430 static struct rte_eth_dev *
431 eth_dev_allocated(const char *name)
432 {
433         uint16_t i;
434
435         RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
436
437         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
438                 if (rte_eth_devices[i].data != NULL &&
439                     strcmp(rte_eth_devices[i].data->name, name) == 0)
440                         return &rte_eth_devices[i];
441         }
442         return NULL;
443 }
444
445 struct rte_eth_dev *
446 rte_eth_dev_allocated(const char *name)
447 {
448         struct rte_eth_dev *ethdev;
449
450         eth_dev_shared_data_prepare();
451
452         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
453
454         ethdev = eth_dev_allocated(name);
455
456         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
457
458         return ethdev;
459 }
460
461 static uint16_t
462 eth_dev_find_free_port(void)
463 {
464         uint16_t i;
465
466         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
467                 /* Using shared name field to find a free port. */
468                 if (eth_dev_shared_data->data[i].name[0] == '\0') {
469                         RTE_ASSERT(rte_eth_devices[i].state ==
470                                    RTE_ETH_DEV_UNUSED);
471                         return i;
472                 }
473         }
474         return RTE_MAX_ETHPORTS;
475 }
476
477 static struct rte_eth_dev *
478 eth_dev_get(uint16_t port_id)
479 {
480         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
481
482         eth_dev->data = &eth_dev_shared_data->data[port_id];
483
484         return eth_dev;
485 }
486
487 struct rte_eth_dev *
488 rte_eth_dev_allocate(const char *name)
489 {
490         uint16_t port_id;
491         struct rte_eth_dev *eth_dev = NULL;
492         size_t name_len;
493
494         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
495         if (name_len == 0) {
496                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
497                 return NULL;
498         }
499
500         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
501                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
502                 return NULL;
503         }
504
505         eth_dev_shared_data_prepare();
506
507         /* Synchronize port creation between primary and secondary threads. */
508         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
509
510         if (eth_dev_allocated(name) != NULL) {
511                 RTE_ETHDEV_LOG(ERR,
512                         "Ethernet device with name %s already allocated\n",
513                         name);
514                 goto unlock;
515         }
516
517         port_id = eth_dev_find_free_port();
518         if (port_id == RTE_MAX_ETHPORTS) {
519                 RTE_ETHDEV_LOG(ERR,
520                         "Reached maximum number of Ethernet ports\n");
521                 goto unlock;
522         }
523
524         eth_dev = eth_dev_get(port_id);
525         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
526         eth_dev->data->port_id = port_id;
527         eth_dev->data->mtu = RTE_ETHER_MTU;
528         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
529
530 unlock:
531         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
532
533         return eth_dev;
534 }
535
536 /*
537  * Attach to a port already registered by the primary process, which
538  * makes sure that the same device would have the same port id both
539  * in the primary and secondary process.
540  */
541 struct rte_eth_dev *
542 rte_eth_dev_attach_secondary(const char *name)
543 {
544         uint16_t i;
545         struct rte_eth_dev *eth_dev = NULL;
546
547         eth_dev_shared_data_prepare();
548
549         /* Synchronize port attachment to primary port creation and release. */
550         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
551
552         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
553                 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
554                         break;
555         }
556         if (i == RTE_MAX_ETHPORTS) {
557                 RTE_ETHDEV_LOG(ERR,
558                         "Device %s is not driven by the primary process\n",
559                         name);
560         } else {
561                 eth_dev = eth_dev_get(i);
562                 RTE_ASSERT(eth_dev->data->port_id == i);
563         }
564
565         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
566         return eth_dev;
567 }
568
569 int
570 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
571 {
572         if (eth_dev == NULL)
573                 return -EINVAL;
574
575         eth_dev_shared_data_prepare();
576
577         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
578                 rte_eth_dev_callback_process(eth_dev,
579                                 RTE_ETH_EVENT_DESTROY, NULL);
580
581         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
582
583         eth_dev->state = RTE_ETH_DEV_UNUSED;
584         eth_dev->device = NULL;
585         eth_dev->process_private = NULL;
586         eth_dev->intr_handle = NULL;
587         eth_dev->rx_pkt_burst = NULL;
588         eth_dev->tx_pkt_burst = NULL;
589         eth_dev->tx_pkt_prepare = NULL;
590         eth_dev->rx_queue_count = NULL;
591         eth_dev->rx_descriptor_done = NULL;
592         eth_dev->rx_descriptor_status = NULL;
593         eth_dev->tx_descriptor_status = NULL;
594         eth_dev->dev_ops = NULL;
595
596         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
597                 rte_free(eth_dev->data->rx_queues);
598                 rte_free(eth_dev->data->tx_queues);
599                 rte_free(eth_dev->data->mac_addrs);
600                 rte_free(eth_dev->data->hash_mac_addrs);
601                 rte_free(eth_dev->data->dev_private);
602                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
603                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
604         }
605
606         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
607
608         return 0;
609 }
610
611 int
612 rte_eth_dev_is_valid_port(uint16_t port_id)
613 {
614         if (port_id >= RTE_MAX_ETHPORTS ||
615             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
616                 return 0;
617         else
618                 return 1;
619 }
620
621 static int
622 eth_is_valid_owner_id(uint64_t owner_id)
623 {
624         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
625             eth_dev_shared_data->next_owner_id <= owner_id)
626                 return 0;
627         return 1;
628 }
629
630 uint64_t
631 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
632 {
633         port_id = rte_eth_find_next(port_id);
634         while (port_id < RTE_MAX_ETHPORTS &&
635                         rte_eth_devices[port_id].data->owner.id != owner_id)
636                 port_id = rte_eth_find_next(port_id + 1);
637
638         return port_id;
639 }
640
641 int
642 rte_eth_dev_owner_new(uint64_t *owner_id)
643 {
644         if (owner_id == NULL) {
645                 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
646                 return -EINVAL;
647         }
648
649         eth_dev_shared_data_prepare();
650
651         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
652
653         *owner_id = eth_dev_shared_data->next_owner_id++;
654
655         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
656         return 0;
657 }
658
659 static int
660 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
661                        const struct rte_eth_dev_owner *new_owner)
662 {
663         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
664         struct rte_eth_dev_owner *port_owner;
665
666         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
667                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
668                         port_id);
669                 return -ENODEV;
670         }
671
672         if (new_owner == NULL) {
673                 RTE_ETHDEV_LOG(ERR,
674                         "Cannot set ethdev port %u owner from NULL owner\n",
675                         port_id);
676                 return -EINVAL;
677         }
678
679         if (!eth_is_valid_owner_id(new_owner->id) &&
680             !eth_is_valid_owner_id(old_owner_id)) {
681                 RTE_ETHDEV_LOG(ERR,
682                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
683                        old_owner_id, new_owner->id);
684                 return -EINVAL;
685         }
686
687         port_owner = &rte_eth_devices[port_id].data->owner;
688         if (port_owner->id != old_owner_id) {
689                 RTE_ETHDEV_LOG(ERR,
690                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
691                         port_id, port_owner->name, port_owner->id);
692                 return -EPERM;
693         }
694
695         /* can not truncate (same structure) */
696         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
697
698         port_owner->id = new_owner->id;
699
700         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
701                 port_id, new_owner->name, new_owner->id);
702
703         return 0;
704 }
705
706 int
707 rte_eth_dev_owner_set(const uint16_t port_id,
708                       const struct rte_eth_dev_owner *owner)
709 {
710         int ret;
711
712         eth_dev_shared_data_prepare();
713
714         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
715
716         ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
717
718         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
719         return ret;
720 }
721
722 int
723 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
724 {
725         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
726                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
727         int ret;
728
729         eth_dev_shared_data_prepare();
730
731         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
732
733         ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
734
735         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
736         return ret;
737 }
738
739 int
740 rte_eth_dev_owner_delete(const uint64_t owner_id)
741 {
742         uint16_t port_id;
743         int ret = 0;
744
745         eth_dev_shared_data_prepare();
746
747         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
748
749         if (eth_is_valid_owner_id(owner_id)) {
750                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
751                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
752                                 memset(&rte_eth_devices[port_id].data->owner, 0,
753                                        sizeof(struct rte_eth_dev_owner));
754                 RTE_ETHDEV_LOG(NOTICE,
755                         "All port owners owned by %016"PRIx64" identifier have removed\n",
756                         owner_id);
757         } else {
758                 RTE_ETHDEV_LOG(ERR,
759                                "Invalid owner id=%016"PRIx64"\n",
760                                owner_id);
761                 ret = -EINVAL;
762         }
763
764         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
765
766         return ret;
767 }
768
769 int
770 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
771 {
772         struct rte_eth_dev *ethdev;
773
774         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
775         ethdev = &rte_eth_devices[port_id];
776
777         if (!eth_dev_is_allocated(ethdev)) {
778                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
779                         port_id);
780                 return -ENODEV;
781         }
782
783         if (owner == NULL) {
784                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
785                         port_id);
786                 return -EINVAL;
787         }
788
789         eth_dev_shared_data_prepare();
790
791         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
792         rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
793         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
794
795         return 0;
796 }
797
798 int
799 rte_eth_dev_socket_id(uint16_t port_id)
800 {
801         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
802         return rte_eth_devices[port_id].data->numa_node;
803 }
804
805 void *
806 rte_eth_dev_get_sec_ctx(uint16_t port_id)
807 {
808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
809         return rte_eth_devices[port_id].security_ctx;
810 }
811
812 uint16_t
813 rte_eth_dev_count_avail(void)
814 {
815         uint16_t p;
816         uint16_t count;
817
818         count = 0;
819
820         RTE_ETH_FOREACH_DEV(p)
821                 count++;
822
823         return count;
824 }
825
826 uint16_t
827 rte_eth_dev_count_total(void)
828 {
829         uint16_t port, count = 0;
830
831         RTE_ETH_FOREACH_VALID_DEV(port)
832                 count++;
833
834         return count;
835 }
836
837 int
838 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
839 {
840         char *tmp;
841
842         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
843
844         if (name == NULL) {
845                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
846                         port_id);
847                 return -EINVAL;
848         }
849
850         /* shouldn't check 'rte_eth_devices[i].data',
851          * because it might be overwritten by VDEV PMD */
852         tmp = eth_dev_shared_data->data[port_id].name;
853         strcpy(name, tmp);
854         return 0;
855 }
856
857 int
858 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
859 {
860         uint16_t pid;
861
862         if (name == NULL) {
863                 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
864                 return -EINVAL;
865         }
866
867         if (port_id == NULL) {
868                 RTE_ETHDEV_LOG(ERR,
869                         "Cannot get port ID to NULL for %s\n", name);
870                 return -EINVAL;
871         }
872
873         RTE_ETH_FOREACH_VALID_DEV(pid)
874                 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
875                         *port_id = pid;
876                         return 0;
877                 }
878
879         return -ENODEV;
880 }
881
882 static int
883 eth_err(uint16_t port_id, int ret)
884 {
885         if (ret == 0)
886                 return 0;
887         if (rte_eth_dev_is_removed(port_id))
888                 return -EIO;
889         return ret;
890 }
891
892 static int
893 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
894 {
895         uint16_t old_nb_queues = dev->data->nb_rx_queues;
896         void **rxq;
897         unsigned i;
898
899         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
900                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
901                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
902                                 RTE_CACHE_LINE_SIZE);
903                 if (dev->data->rx_queues == NULL) {
904                         dev->data->nb_rx_queues = 0;
905                         return -(ENOMEM);
906                 }
907         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
908                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
909
910                 rxq = dev->data->rx_queues;
911
912                 for (i = nb_queues; i < old_nb_queues; i++)
913                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
914                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
915                                 RTE_CACHE_LINE_SIZE);
916                 if (rxq == NULL)
917                         return -(ENOMEM);
918                 if (nb_queues > old_nb_queues) {
919                         uint16_t new_qs = nb_queues - old_nb_queues;
920
921                         memset(rxq + old_nb_queues, 0,
922                                 sizeof(rxq[0]) * new_qs);
923                 }
924
925                 dev->data->rx_queues = rxq;
926
927         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
928                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
929
930                 rxq = dev->data->rx_queues;
931
932                 for (i = nb_queues; i < old_nb_queues; i++)
933                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
934
935                 rte_free(dev->data->rx_queues);
936                 dev->data->rx_queues = NULL;
937         }
938         dev->data->nb_rx_queues = nb_queues;
939         return 0;
940 }
941
942 static int
943 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
944 {
945         uint16_t port_id;
946
947         if (rx_queue_id >= dev->data->nb_rx_queues) {
948                 port_id = dev->data->port_id;
949                 RTE_ETHDEV_LOG(ERR,
950                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
951                                rx_queue_id, port_id);
952                 return -EINVAL;
953         }
954
955         if (dev->data->rx_queues[rx_queue_id] == NULL) {
956                 port_id = dev->data->port_id;
957                 RTE_ETHDEV_LOG(ERR,
958                                "Queue %u of device with port_id=%u has not been setup\n",
959                                rx_queue_id, port_id);
960                 return -EINVAL;
961         }
962
963         return 0;
964 }
965
966 static int
967 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
968 {
969         uint16_t port_id;
970
971         if (tx_queue_id >= dev->data->nb_tx_queues) {
972                 port_id = dev->data->port_id;
973                 RTE_ETHDEV_LOG(ERR,
974                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
975                                tx_queue_id, port_id);
976                 return -EINVAL;
977         }
978
979         if (dev->data->tx_queues[tx_queue_id] == NULL) {
980                 port_id = dev->data->port_id;
981                 RTE_ETHDEV_LOG(ERR,
982                                "Queue %u of device with port_id=%u has not been setup\n",
983                                tx_queue_id, port_id);
984                 return -EINVAL;
985         }
986
987         return 0;
988 }
989
990 int
991 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
992 {
993         struct rte_eth_dev *dev;
994         int ret;
995
996         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
997         dev = &rte_eth_devices[port_id];
998
999         if (!dev->data->dev_started) {
1000                 RTE_ETHDEV_LOG(ERR,
1001                         "Port %u must be started before start any queue\n",
1002                         port_id);
1003                 return -EINVAL;
1004         }
1005
1006         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1007         if (ret != 0)
1008                 return ret;
1009
1010         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
1011
1012         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1013                 RTE_ETHDEV_LOG(INFO,
1014                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1015                         rx_queue_id, port_id);
1016                 return -EINVAL;
1017         }
1018
1019         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1020                 RTE_ETHDEV_LOG(INFO,
1021                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1022                         rx_queue_id, port_id);
1023                 return 0;
1024         }
1025
1026         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
1027 }
1028
1029 int
1030 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
1031 {
1032         struct rte_eth_dev *dev;
1033         int ret;
1034
1035         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1036         dev = &rte_eth_devices[port_id];
1037
1038         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1039         if (ret != 0)
1040                 return ret;
1041
1042         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
1043
1044         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1045                 RTE_ETHDEV_LOG(INFO,
1046                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1047                         rx_queue_id, port_id);
1048                 return -EINVAL;
1049         }
1050
1051         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1052                 RTE_ETHDEV_LOG(INFO,
1053                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1054                         rx_queue_id, port_id);
1055                 return 0;
1056         }
1057
1058         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1059 }
1060
1061 int
1062 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1063 {
1064         struct rte_eth_dev *dev;
1065         int ret;
1066
1067         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1068         dev = &rte_eth_devices[port_id];
1069
1070         if (!dev->data->dev_started) {
1071                 RTE_ETHDEV_LOG(ERR,
1072                         "Port %u must be started before start any queue\n",
1073                         port_id);
1074                 return -EINVAL;
1075         }
1076
1077         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1078         if (ret != 0)
1079                 return ret;
1080
1081         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1082
1083         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1084                 RTE_ETHDEV_LOG(INFO,
1085                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1086                         tx_queue_id, port_id);
1087                 return -EINVAL;
1088         }
1089
1090         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1091                 RTE_ETHDEV_LOG(INFO,
1092                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1093                         tx_queue_id, port_id);
1094                 return 0;
1095         }
1096
1097         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1098 }
1099
1100 int
1101 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1102 {
1103         struct rte_eth_dev *dev;
1104         int ret;
1105
1106         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1107         dev = &rte_eth_devices[port_id];
1108
1109         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1110         if (ret != 0)
1111                 return ret;
1112
1113         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1114
1115         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1116                 RTE_ETHDEV_LOG(INFO,
1117                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1118                         tx_queue_id, port_id);
1119                 return -EINVAL;
1120         }
1121
1122         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1123                 RTE_ETHDEV_LOG(INFO,
1124                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1125                         tx_queue_id, port_id);
1126                 return 0;
1127         }
1128
1129         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1130 }
1131
1132 static int
1133 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1134 {
1135         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1136         void **txq;
1137         unsigned i;
1138
1139         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1140                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1141                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1142                                                    RTE_CACHE_LINE_SIZE);
1143                 if (dev->data->tx_queues == NULL) {
1144                         dev->data->nb_tx_queues = 0;
1145                         return -(ENOMEM);
1146                 }
1147         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1148                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1149
1150                 txq = dev->data->tx_queues;
1151
1152                 for (i = nb_queues; i < old_nb_queues; i++)
1153                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1154                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1155                                   RTE_CACHE_LINE_SIZE);
1156                 if (txq == NULL)
1157                         return -ENOMEM;
1158                 if (nb_queues > old_nb_queues) {
1159                         uint16_t new_qs = nb_queues - old_nb_queues;
1160
1161                         memset(txq + old_nb_queues, 0,
1162                                sizeof(txq[0]) * new_qs);
1163                 }
1164
1165                 dev->data->tx_queues = txq;
1166
1167         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1168                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1169
1170                 txq = dev->data->tx_queues;
1171
1172                 for (i = nb_queues; i < old_nb_queues; i++)
1173                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1174
1175                 rte_free(dev->data->tx_queues);
1176                 dev->data->tx_queues = NULL;
1177         }
1178         dev->data->nb_tx_queues = nb_queues;
1179         return 0;
1180 }
1181
1182 uint32_t
1183 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1184 {
1185         switch (speed) {
1186         case ETH_SPEED_NUM_10M:
1187                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1188         case ETH_SPEED_NUM_100M:
1189                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1190         case ETH_SPEED_NUM_1G:
1191                 return ETH_LINK_SPEED_1G;
1192         case ETH_SPEED_NUM_2_5G:
1193                 return ETH_LINK_SPEED_2_5G;
1194         case ETH_SPEED_NUM_5G:
1195                 return ETH_LINK_SPEED_5G;
1196         case ETH_SPEED_NUM_10G:
1197                 return ETH_LINK_SPEED_10G;
1198         case ETH_SPEED_NUM_20G:
1199                 return ETH_LINK_SPEED_20G;
1200         case ETH_SPEED_NUM_25G:
1201                 return ETH_LINK_SPEED_25G;
1202         case ETH_SPEED_NUM_40G:
1203                 return ETH_LINK_SPEED_40G;
1204         case ETH_SPEED_NUM_50G:
1205                 return ETH_LINK_SPEED_50G;
1206         case ETH_SPEED_NUM_56G:
1207                 return ETH_LINK_SPEED_56G;
1208         case ETH_SPEED_NUM_100G:
1209                 return ETH_LINK_SPEED_100G;
1210         case ETH_SPEED_NUM_200G:
1211                 return ETH_LINK_SPEED_200G;
1212         default:
1213                 return 0;
1214         }
1215 }
1216
1217 const char *
1218 rte_eth_dev_rx_offload_name(uint64_t offload)
1219 {
1220         const char *name = "UNKNOWN";
1221         unsigned int i;
1222
1223         for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1224                 if (offload == eth_dev_rx_offload_names[i].offload) {
1225                         name = eth_dev_rx_offload_names[i].name;
1226                         break;
1227                 }
1228         }
1229
1230         return name;
1231 }
1232
1233 const char *
1234 rte_eth_dev_tx_offload_name(uint64_t offload)
1235 {
1236         const char *name = "UNKNOWN";
1237         unsigned int i;
1238
1239         for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1240                 if (offload == eth_dev_tx_offload_names[i].offload) {
1241                         name = eth_dev_tx_offload_names[i].name;
1242                         break;
1243                 }
1244         }
1245
1246         return name;
1247 }
1248
1249 static inline int
1250 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1251                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1252 {
1253         int ret = 0;
1254
1255         if (dev_info_size == 0) {
1256                 if (config_size != max_rx_pkt_len) {
1257                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1258                                        " %u != %u is not allowed\n",
1259                                        port_id, config_size, max_rx_pkt_len);
1260                         ret = -EINVAL;
1261                 }
1262         } else if (config_size > dev_info_size) {
1263                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1264                                "> max allowed value %u\n", port_id, config_size,
1265                                dev_info_size);
1266                 ret = -EINVAL;
1267         } else if (config_size < RTE_ETHER_MIN_LEN) {
1268                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1269                                "< min allowed value %u\n", port_id, config_size,
1270                                (unsigned int)RTE_ETHER_MIN_LEN);
1271                 ret = -EINVAL;
1272         }
1273         return ret;
1274 }
1275
1276 /*
1277  * Validate offloads that are requested through rte_eth_dev_configure against
1278  * the offloads successfully set by the ethernet device.
1279  *
1280  * @param port_id
1281  *   The port identifier of the Ethernet device.
1282  * @param req_offloads
1283  *   The offloads that have been requested through `rte_eth_dev_configure`.
1284  * @param set_offloads
1285  *   The offloads successfully set by the ethernet device.
1286  * @param offload_type
1287  *   The offload type i.e. Rx/Tx string.
1288  * @param offload_name
1289  *   The function that prints the offload name.
1290  * @return
1291  *   - (0) if validation successful.
1292  *   - (-EINVAL) if requested offload has been silently disabled.
1293  *
1294  */
1295 static int
1296 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1297                   uint64_t set_offloads, const char *offload_type,
1298                   const char *(*offload_name)(uint64_t))
1299 {
1300         uint64_t offloads_diff = req_offloads ^ set_offloads;
1301         uint64_t offload;
1302         int ret = 0;
1303
1304         while (offloads_diff != 0) {
1305                 /* Check if any offload is requested but not enabled. */
1306                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1307                 if (offload & req_offloads) {
1308                         RTE_ETHDEV_LOG(ERR,
1309                                 "Port %u failed to enable %s offload %s\n",
1310                                 port_id, offload_type, offload_name(offload));
1311                         ret = -EINVAL;
1312                 }
1313
1314                 /* Check if offload couldn't be disabled. */
1315                 if (offload & set_offloads) {
1316                         RTE_ETHDEV_LOG(DEBUG,
1317                                 "Port %u %s offload %s is not requested but enabled\n",
1318                                 port_id, offload_type, offload_name(offload));
1319                 }
1320
1321                 offloads_diff &= ~offload;
1322         }
1323
1324         return ret;
1325 }
1326
1327 int
1328 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1329                       const struct rte_eth_conf *dev_conf)
1330 {
1331         struct rte_eth_dev *dev;
1332         struct rte_eth_dev_info dev_info;
1333         struct rte_eth_conf orig_conf;
1334         uint16_t overhead_len;
1335         int diag;
1336         int ret;
1337         uint16_t old_mtu;
1338
1339         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1340         dev = &rte_eth_devices[port_id];
1341
1342         if (dev_conf == NULL) {
1343                 RTE_ETHDEV_LOG(ERR,
1344                         "Cannot configure ethdev port %u from NULL config\n",
1345                         port_id);
1346                 return -EINVAL;
1347         }
1348
1349         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1350
1351         if (dev->data->dev_started) {
1352                 RTE_ETHDEV_LOG(ERR,
1353                         "Port %u must be stopped to allow configuration\n",
1354                         port_id);
1355                 return -EBUSY;
1356         }
1357
1358         /*
1359          * Ensure that "dev_configured" is always 0 each time prepare to do
1360          * dev_configure() to avoid any non-anticipated behaviour.
1361          * And set to 1 when dev_configure() is executed successfully.
1362          */
1363         dev->data->dev_configured = 0;
1364
1365          /* Store original config, as rollback required on failure */
1366         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1367
1368         /*
1369          * Copy the dev_conf parameter into the dev structure.
1370          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1371          */
1372         if (dev_conf != &dev->data->dev_conf)
1373                 memcpy(&dev->data->dev_conf, dev_conf,
1374                        sizeof(dev->data->dev_conf));
1375
1376         /* Backup mtu for rollback */
1377         old_mtu = dev->data->mtu;
1378
1379         ret = rte_eth_dev_info_get(port_id, &dev_info);
1380         if (ret != 0)
1381                 goto rollback;
1382
1383         /* Get the real Ethernet overhead length */
1384         if (dev_info.max_mtu != UINT16_MAX &&
1385             dev_info.max_rx_pktlen > dev_info.max_mtu)
1386                 overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
1387         else
1388                 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1389
1390         /* If number of queues specified by application for both Rx and Tx is
1391          * zero, use driver preferred values. This cannot be done individually
1392          * as it is valid for either Tx or Rx (but not both) to be zero.
1393          * If driver does not provide any preferred valued, fall back on
1394          * EAL defaults.
1395          */
1396         if (nb_rx_q == 0 && nb_tx_q == 0) {
1397                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1398                 if (nb_rx_q == 0)
1399                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1400                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1401                 if (nb_tx_q == 0)
1402                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1403         }
1404
1405         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1406                 RTE_ETHDEV_LOG(ERR,
1407                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1408                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1409                 ret = -EINVAL;
1410                 goto rollback;
1411         }
1412
1413         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1414                 RTE_ETHDEV_LOG(ERR,
1415                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1416                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1417                 ret = -EINVAL;
1418                 goto rollback;
1419         }
1420
1421         /*
1422          * Check that the numbers of RX and TX queues are not greater
1423          * than the maximum number of RX and TX queues supported by the
1424          * configured device.
1425          */
1426         if (nb_rx_q > dev_info.max_rx_queues) {
1427                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1428                         port_id, nb_rx_q, dev_info.max_rx_queues);
1429                 ret = -EINVAL;
1430                 goto rollback;
1431         }
1432
1433         if (nb_tx_q > dev_info.max_tx_queues) {
1434                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1435                         port_id, nb_tx_q, dev_info.max_tx_queues);
1436                 ret = -EINVAL;
1437                 goto rollback;
1438         }
1439
1440         /* Check that the device supports requested interrupts */
1441         if ((dev_conf->intr_conf.lsc == 1) &&
1442                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1443                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1444                         dev->device->driver->name);
1445                 ret = -EINVAL;
1446                 goto rollback;
1447         }
1448         if ((dev_conf->intr_conf.rmv == 1) &&
1449                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1450                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1451                         dev->device->driver->name);
1452                 ret = -EINVAL;
1453                 goto rollback;
1454         }
1455
1456         /*
1457          * If jumbo frames are enabled, check that the maximum RX packet
1458          * length is supported by the configured device.
1459          */
1460         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1461                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1462                         RTE_ETHDEV_LOG(ERR,
1463                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1464                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1465                                 dev_info.max_rx_pktlen);
1466                         ret = -EINVAL;
1467                         goto rollback;
1468                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1469                         RTE_ETHDEV_LOG(ERR,
1470                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1471                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1472                                 (unsigned int)RTE_ETHER_MIN_LEN);
1473                         ret = -EINVAL;
1474                         goto rollback;
1475                 }
1476
1477                 /* Scale the MTU size to adapt max_rx_pkt_len */
1478                 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
1479                                 overhead_len;
1480         } else {
1481                 uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
1482                 if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
1483                     pktlen > RTE_ETHER_MTU + overhead_len)
1484                         /* Use default value */
1485                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1486                                                 RTE_ETHER_MTU + overhead_len;
1487         }
1488
1489         /*
1490          * If LRO is enabled, check that the maximum aggregated packet
1491          * size is supported by the configured device.
1492          */
1493         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1494                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1495                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1496                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1497                 ret = eth_dev_check_lro_pkt_size(port_id,
1498                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1499                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1500                                 dev_info.max_lro_pkt_size);
1501                 if (ret != 0)
1502                         goto rollback;
1503         }
1504
1505         /* Any requested offloading must be within its device capabilities */
1506         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1507              dev_conf->rxmode.offloads) {
1508                 RTE_ETHDEV_LOG(ERR,
1509                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1510                         "capabilities 0x%"PRIx64" in %s()\n",
1511                         port_id, dev_conf->rxmode.offloads,
1512                         dev_info.rx_offload_capa,
1513                         __func__);
1514                 ret = -EINVAL;
1515                 goto rollback;
1516         }
1517         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1518              dev_conf->txmode.offloads) {
1519                 RTE_ETHDEV_LOG(ERR,
1520                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1521                         "capabilities 0x%"PRIx64" in %s()\n",
1522                         port_id, dev_conf->txmode.offloads,
1523                         dev_info.tx_offload_capa,
1524                         __func__);
1525                 ret = -EINVAL;
1526                 goto rollback;
1527         }
1528
1529         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1530                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1531
1532         /* Check that device supports requested rss hash functions. */
1533         if ((dev_info.flow_type_rss_offloads |
1534              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1535             dev_info.flow_type_rss_offloads) {
1536                 RTE_ETHDEV_LOG(ERR,
1537                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1538                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1539                         dev_info.flow_type_rss_offloads);
1540                 ret = -EINVAL;
1541                 goto rollback;
1542         }
1543
1544         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1545         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1546             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1547                 RTE_ETHDEV_LOG(ERR,
1548                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1549                         port_id,
1550                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1551                 ret = -EINVAL;
1552                 goto rollback;
1553         }
1554
1555         /*
1556          * Setup new number of RX/TX queues and reconfigure device.
1557          */
1558         diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1559         if (diag != 0) {
1560                 RTE_ETHDEV_LOG(ERR,
1561                         "Port%u eth_dev_rx_queue_config = %d\n",
1562                         port_id, diag);
1563                 ret = diag;
1564                 goto rollback;
1565         }
1566
1567         diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1568         if (diag != 0) {
1569                 RTE_ETHDEV_LOG(ERR,
1570                         "Port%u eth_dev_tx_queue_config = %d\n",
1571                         port_id, diag);
1572                 eth_dev_rx_queue_config(dev, 0);
1573                 ret = diag;
1574                 goto rollback;
1575         }
1576
1577         diag = (*dev->dev_ops->dev_configure)(dev);
1578         if (diag != 0) {
1579                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1580                         port_id, diag);
1581                 ret = eth_err(port_id, diag);
1582                 goto reset_queues;
1583         }
1584
1585         /* Initialize Rx profiling if enabled at compilation time. */
1586         diag = __rte_eth_dev_profile_init(port_id, dev);
1587         if (diag != 0) {
1588                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1589                         port_id, diag);
1590                 ret = eth_err(port_id, diag);
1591                 goto reset_queues;
1592         }
1593
1594         /* Validate Rx offloads. */
1595         diag = eth_dev_validate_offloads(port_id,
1596                         dev_conf->rxmode.offloads,
1597                         dev->data->dev_conf.rxmode.offloads, "Rx",
1598                         rte_eth_dev_rx_offload_name);
1599         if (diag != 0) {
1600                 ret = diag;
1601                 goto reset_queues;
1602         }
1603
1604         /* Validate Tx offloads. */
1605         diag = eth_dev_validate_offloads(port_id,
1606                         dev_conf->txmode.offloads,
1607                         dev->data->dev_conf.txmode.offloads, "Tx",
1608                         rte_eth_dev_tx_offload_name);
1609         if (diag != 0) {
1610                 ret = diag;
1611                 goto reset_queues;
1612         }
1613
1614         dev->data->dev_configured = 1;
1615         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1616         return 0;
1617 reset_queues:
1618         eth_dev_rx_queue_config(dev, 0);
1619         eth_dev_tx_queue_config(dev, 0);
1620 rollback:
1621         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1622         if (old_mtu != dev->data->mtu)
1623                 dev->data->mtu = old_mtu;
1624
1625         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1626         return ret;
1627 }
1628
1629 void
1630 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1631 {
1632         if (dev->data->dev_started) {
1633                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1634                         dev->data->port_id);
1635                 return;
1636         }
1637
1638         eth_dev_rx_queue_config(dev, 0);
1639         eth_dev_tx_queue_config(dev, 0);
1640
1641         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1642 }
1643
1644 static void
1645 eth_dev_mac_restore(struct rte_eth_dev *dev,
1646                         struct rte_eth_dev_info *dev_info)
1647 {
1648         struct rte_ether_addr *addr;
1649         uint16_t i;
1650         uint32_t pool = 0;
1651         uint64_t pool_mask;
1652
1653         /* replay MAC address configuration including default MAC */
1654         addr = &dev->data->mac_addrs[0];
1655         if (*dev->dev_ops->mac_addr_set != NULL)
1656                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1657         else if (*dev->dev_ops->mac_addr_add != NULL)
1658                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1659
1660         if (*dev->dev_ops->mac_addr_add != NULL) {
1661                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1662                         addr = &dev->data->mac_addrs[i];
1663
1664                         /* skip zero address */
1665                         if (rte_is_zero_ether_addr(addr))
1666                                 continue;
1667
1668                         pool = 0;
1669                         pool_mask = dev->data->mac_pool_sel[i];
1670
1671                         do {
1672                                 if (pool_mask & 1ULL)
1673                                         (*dev->dev_ops->mac_addr_add)(dev,
1674                                                 addr, i, pool);
1675                                 pool_mask >>= 1;
1676                                 pool++;
1677                         } while (pool_mask);
1678                 }
1679         }
1680 }
1681
1682 static int
1683 eth_dev_config_restore(struct rte_eth_dev *dev,
1684                 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1685 {
1686         int ret;
1687
1688         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1689                 eth_dev_mac_restore(dev, dev_info);
1690
1691         /* replay promiscuous configuration */
1692         /*
1693          * use callbacks directly since we don't need port_id check and
1694          * would like to bypass the same value set
1695          */
1696         if (rte_eth_promiscuous_get(port_id) == 1 &&
1697             *dev->dev_ops->promiscuous_enable != NULL) {
1698                 ret = eth_err(port_id,
1699                               (*dev->dev_ops->promiscuous_enable)(dev));
1700                 if (ret != 0 && ret != -ENOTSUP) {
1701                         RTE_ETHDEV_LOG(ERR,
1702                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1703                                 port_id, rte_strerror(-ret));
1704                         return ret;
1705                 }
1706         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1707                    *dev->dev_ops->promiscuous_disable != NULL) {
1708                 ret = eth_err(port_id,
1709                               (*dev->dev_ops->promiscuous_disable)(dev));
1710                 if (ret != 0 && ret != -ENOTSUP) {
1711                         RTE_ETHDEV_LOG(ERR,
1712                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1713                                 port_id, rte_strerror(-ret));
1714                         return ret;
1715                 }
1716         }
1717
1718         /* replay all multicast configuration */
1719         /*
1720          * use callbacks directly since we don't need port_id check and
1721          * would like to bypass the same value set
1722          */
1723         if (rte_eth_allmulticast_get(port_id) == 1 &&
1724             *dev->dev_ops->allmulticast_enable != NULL) {
1725                 ret = eth_err(port_id,
1726                               (*dev->dev_ops->allmulticast_enable)(dev));
1727                 if (ret != 0 && ret != -ENOTSUP) {
1728                         RTE_ETHDEV_LOG(ERR,
1729                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1730                                 port_id, rte_strerror(-ret));
1731                         return ret;
1732                 }
1733         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1734                    *dev->dev_ops->allmulticast_disable != NULL) {
1735                 ret = eth_err(port_id,
1736                               (*dev->dev_ops->allmulticast_disable)(dev));
1737                 if (ret != 0 && ret != -ENOTSUP) {
1738                         RTE_ETHDEV_LOG(ERR,
1739                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1740                                 port_id, rte_strerror(-ret));
1741                         return ret;
1742                 }
1743         }
1744
1745         return 0;
1746 }
1747
1748 int
1749 rte_eth_dev_start(uint16_t port_id)
1750 {
1751         struct rte_eth_dev *dev;
1752         struct rte_eth_dev_info dev_info;
1753         int diag;
1754         int ret, ret_stop;
1755
1756         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1757         dev = &rte_eth_devices[port_id];
1758
1759         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1760
1761         if (dev->data->dev_configured == 0) {
1762                 RTE_ETHDEV_LOG(INFO,
1763                         "Device with port_id=%"PRIu16" is not configured.\n",
1764                         port_id);
1765                 return -EINVAL;
1766         }
1767
1768         if (dev->data->dev_started != 0) {
1769                 RTE_ETHDEV_LOG(INFO,
1770                         "Device with port_id=%"PRIu16" already started\n",
1771                         port_id);
1772                 return 0;
1773         }
1774
1775         ret = rte_eth_dev_info_get(port_id, &dev_info);
1776         if (ret != 0)
1777                 return ret;
1778
1779         /* Lets restore MAC now if device does not support live change */
1780         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1781                 eth_dev_mac_restore(dev, &dev_info);
1782
1783         diag = (*dev->dev_ops->dev_start)(dev);
1784         if (diag == 0)
1785                 dev->data->dev_started = 1;
1786         else
1787                 return eth_err(port_id, diag);
1788
1789         ret = eth_dev_config_restore(dev, &dev_info, port_id);
1790         if (ret != 0) {
1791                 RTE_ETHDEV_LOG(ERR,
1792                         "Error during restoring configuration for device (port %u): %s\n",
1793                         port_id, rte_strerror(-ret));
1794                 ret_stop = rte_eth_dev_stop(port_id);
1795                 if (ret_stop != 0) {
1796                         RTE_ETHDEV_LOG(ERR,
1797                                 "Failed to stop device (port %u): %s\n",
1798                                 port_id, rte_strerror(-ret_stop));
1799                 }
1800
1801                 return ret;
1802         }
1803
1804         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1805                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1806                 (*dev->dev_ops->link_update)(dev, 0);
1807         }
1808
1809         rte_ethdev_trace_start(port_id);
1810         return 0;
1811 }
1812
1813 int
1814 rte_eth_dev_stop(uint16_t port_id)
1815 {
1816         struct rte_eth_dev *dev;
1817         int ret;
1818
1819         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1820         dev = &rte_eth_devices[port_id];
1821
1822         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1823
1824         if (dev->data->dev_started == 0) {
1825                 RTE_ETHDEV_LOG(INFO,
1826                         "Device with port_id=%"PRIu16" already stopped\n",
1827                         port_id);
1828                 return 0;
1829         }
1830
1831         dev->data->dev_started = 0;
1832         ret = (*dev->dev_ops->dev_stop)(dev);
1833         rte_ethdev_trace_stop(port_id, ret);
1834
1835         return ret;
1836 }
1837
1838 int
1839 rte_eth_dev_set_link_up(uint16_t port_id)
1840 {
1841         struct rte_eth_dev *dev;
1842
1843         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1844         dev = &rte_eth_devices[port_id];
1845
1846         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1847         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1848 }
1849
1850 int
1851 rte_eth_dev_set_link_down(uint16_t port_id)
1852 {
1853         struct rte_eth_dev *dev;
1854
1855         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1856         dev = &rte_eth_devices[port_id];
1857
1858         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1859         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1860 }
1861
1862 int
1863 rte_eth_dev_close(uint16_t port_id)
1864 {
1865         struct rte_eth_dev *dev;
1866         int firsterr, binerr;
1867         int *lasterr = &firsterr;
1868
1869         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1870         dev = &rte_eth_devices[port_id];
1871
1872         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1873         *lasterr = (*dev->dev_ops->dev_close)(dev);
1874         if (*lasterr != 0)
1875                 lasterr = &binerr;
1876
1877         rte_ethdev_trace_close(port_id);
1878         *lasterr = rte_eth_dev_release_port(dev);
1879
1880         return firsterr;
1881 }
1882
1883 int
1884 rte_eth_dev_reset(uint16_t port_id)
1885 {
1886         struct rte_eth_dev *dev;
1887         int ret;
1888
1889         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1890         dev = &rte_eth_devices[port_id];
1891
1892         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1893
1894         ret = rte_eth_dev_stop(port_id);
1895         if (ret != 0) {
1896                 RTE_ETHDEV_LOG(ERR,
1897                         "Failed to stop device (port %u) before reset: %s - ignore\n",
1898                         port_id, rte_strerror(-ret));
1899         }
1900         ret = dev->dev_ops->dev_reset(dev);
1901
1902         return eth_err(port_id, ret);
1903 }
1904
1905 int
1906 rte_eth_dev_is_removed(uint16_t port_id)
1907 {
1908         struct rte_eth_dev *dev;
1909         int ret;
1910
1911         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1912         dev = &rte_eth_devices[port_id];
1913
1914         if (dev->state == RTE_ETH_DEV_REMOVED)
1915                 return 1;
1916
1917         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1918
1919         ret = dev->dev_ops->is_removed(dev);
1920         if (ret != 0)
1921                 /* Device is physically removed. */
1922                 dev->state = RTE_ETH_DEV_REMOVED;
1923
1924         return ret;
1925 }
1926
1927 static int
1928 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1929                              uint16_t n_seg, uint32_t *mbp_buf_size,
1930                              const struct rte_eth_dev_info *dev_info)
1931 {
1932         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1933         struct rte_mempool *mp_first;
1934         uint32_t offset_mask;
1935         uint16_t seg_idx;
1936
1937         if (n_seg > seg_capa->max_nseg) {
1938                 RTE_ETHDEV_LOG(ERR,
1939                                "Requested Rx segments %u exceed supported %u\n",
1940                                n_seg, seg_capa->max_nseg);
1941                 return -EINVAL;
1942         }
1943         /*
1944          * Check the sizes and offsets against buffer sizes
1945          * for each segment specified in extended configuration.
1946          */
1947         mp_first = rx_seg[0].mp;
1948         offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1949         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1950                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1951                 uint32_t length = rx_seg[seg_idx].length;
1952                 uint32_t offset = rx_seg[seg_idx].offset;
1953
1954                 if (mpl == NULL) {
1955                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1956                         return -EINVAL;
1957                 }
1958                 if (seg_idx != 0 && mp_first != mpl &&
1959                     seg_capa->multi_pools == 0) {
1960                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1961                         return -ENOTSUP;
1962                 }
1963                 if (offset != 0) {
1964                         if (seg_capa->offset_allowed == 0) {
1965                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1966                                 return -ENOTSUP;
1967                         }
1968                         if (offset & offset_mask) {
1969                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1970                                                offset,
1971                                                seg_capa->offset_align_log2);
1972                                 return -EINVAL;
1973                         }
1974                 }
1975                 if (mpl->private_data_size <
1976                         sizeof(struct rte_pktmbuf_pool_private)) {
1977                         RTE_ETHDEV_LOG(ERR,
1978                                        "%s private_data_size %u < %u\n",
1979                                        mpl->name, mpl->private_data_size,
1980                                        (unsigned int)sizeof
1981                                         (struct rte_pktmbuf_pool_private));
1982                         return -ENOSPC;
1983                 }
1984                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1985                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1986                 length = length != 0 ? length : *mbp_buf_size;
1987                 if (*mbp_buf_size < length + offset) {
1988                         RTE_ETHDEV_LOG(ERR,
1989                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1990                                        mpl->name, *mbp_buf_size,
1991                                        length + offset, length, offset);
1992                         return -EINVAL;
1993                 }
1994         }
1995         return 0;
1996 }
1997
1998 int
1999 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2000                        uint16_t nb_rx_desc, unsigned int socket_id,
2001                        const struct rte_eth_rxconf *rx_conf,
2002                        struct rte_mempool *mp)
2003 {
2004         int ret;
2005         uint32_t mbp_buf_size;
2006         struct rte_eth_dev *dev;
2007         struct rte_eth_dev_info dev_info;
2008         struct rte_eth_rxconf local_conf;
2009         void **rxq;
2010
2011         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2012         dev = &rte_eth_devices[port_id];
2013
2014         if (rx_queue_id >= dev->data->nb_rx_queues) {
2015                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2016                 return -EINVAL;
2017         }
2018
2019         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
2020
2021         ret = rte_eth_dev_info_get(port_id, &dev_info);
2022         if (ret != 0)
2023                 return ret;
2024
2025         if (mp != NULL) {
2026                 /* Single pool configuration check. */
2027                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
2028                         RTE_ETHDEV_LOG(ERR,
2029                                        "Ambiguous segment configuration\n");
2030                         return -EINVAL;
2031                 }
2032                 /*
2033                  * Check the size of the mbuf data buffer, this value
2034                  * must be provided in the private data of the memory pool.
2035                  * First check that the memory pool(s) has a valid private data.
2036                  */
2037                 if (mp->private_data_size <
2038                                 sizeof(struct rte_pktmbuf_pool_private)) {
2039                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
2040                                 mp->name, mp->private_data_size,
2041                                 (unsigned int)
2042                                 sizeof(struct rte_pktmbuf_pool_private));
2043                         return -ENOSPC;
2044                 }
2045                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
2046                 if (mbp_buf_size < dev_info.min_rx_bufsize +
2047                                    RTE_PKTMBUF_HEADROOM) {
2048                         RTE_ETHDEV_LOG(ERR,
2049                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
2050                                        mp->name, mbp_buf_size,
2051                                        RTE_PKTMBUF_HEADROOM +
2052                                        dev_info.min_rx_bufsize,
2053                                        RTE_PKTMBUF_HEADROOM,
2054                                        dev_info.min_rx_bufsize);
2055                         return -EINVAL;
2056                 }
2057         } else {
2058                 const struct rte_eth_rxseg_split *rx_seg;
2059                 uint16_t n_seg;
2060
2061                 /* Extended multi-segment configuration check. */
2062                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
2063                         RTE_ETHDEV_LOG(ERR,
2064                                        "Memory pool is null and no extended configuration provided\n");
2065                         return -EINVAL;
2066                 }
2067
2068                 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2069                 n_seg = rx_conf->rx_nseg;
2070
2071                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2072                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
2073                                                            &mbp_buf_size,
2074                                                            &dev_info);
2075                         if (ret != 0)
2076                                 return ret;
2077                 } else {
2078                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2079                         return -EINVAL;
2080                 }
2081         }
2082
2083         /* Use default specified by driver, if nb_rx_desc is zero */
2084         if (nb_rx_desc == 0) {
2085                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
2086                 /* If driver default is also zero, fall back on EAL default */
2087                 if (nb_rx_desc == 0)
2088                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2089         }
2090
2091         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2092                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2093                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2094
2095                 RTE_ETHDEV_LOG(ERR,
2096                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2097                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2098                         dev_info.rx_desc_lim.nb_min,
2099                         dev_info.rx_desc_lim.nb_align);
2100                 return -EINVAL;
2101         }
2102
2103         if (dev->data->dev_started &&
2104                 !(dev_info.dev_capa &
2105                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2106                 return -EBUSY;
2107
2108         if (dev->data->dev_started &&
2109                 (dev->data->rx_queue_state[rx_queue_id] !=
2110                         RTE_ETH_QUEUE_STATE_STOPPED))
2111                 return -EBUSY;
2112
2113         rxq = dev->data->rx_queues;
2114         if (rxq[rx_queue_id]) {
2115                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2116                                         -ENOTSUP);
2117                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2118                 rxq[rx_queue_id] = NULL;
2119         }
2120
2121         if (rx_conf == NULL)
2122                 rx_conf = &dev_info.default_rxconf;
2123
2124         local_conf = *rx_conf;
2125
2126         /*
2127          * If an offloading has already been enabled in
2128          * rte_eth_dev_configure(), it has been enabled on all queues,
2129          * so there is no need to enable it in this queue again.
2130          * The local_conf.offloads input to underlying PMD only carries
2131          * those offloadings which are only enabled on this queue and
2132          * not enabled on all queues.
2133          */
2134         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2135
2136         /*
2137          * New added offloadings for this queue are those not enabled in
2138          * rte_eth_dev_configure() and they must be per-queue type.
2139          * A pure per-port offloading can't be enabled on a queue while
2140          * disabled on another queue. A pure per-port offloading can't
2141          * be enabled for any queue as new added one if it hasn't been
2142          * enabled in rte_eth_dev_configure().
2143          */
2144         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2145              local_conf.offloads) {
2146                 RTE_ETHDEV_LOG(ERR,
2147                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2148                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2149                         port_id, rx_queue_id, local_conf.offloads,
2150                         dev_info.rx_queue_offload_capa,
2151                         __func__);
2152                 return -EINVAL;
2153         }
2154
2155         /*
2156          * If LRO is enabled, check that the maximum aggregated packet
2157          * size is supported by the configured device.
2158          */
2159         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2160                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2161                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
2162                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2163                 int ret = eth_dev_check_lro_pkt_size(port_id,
2164                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2165                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2166                                 dev_info.max_lro_pkt_size);
2167                 if (ret != 0)
2168                         return ret;
2169         }
2170
2171         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2172                                               socket_id, &local_conf, mp);
2173         if (!ret) {
2174                 if (!dev->data->min_rx_buf_size ||
2175                     dev->data->min_rx_buf_size > mbp_buf_size)
2176                         dev->data->min_rx_buf_size = mbp_buf_size;
2177         }
2178
2179         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2180                 rx_conf, ret);
2181         return eth_err(port_id, ret);
2182 }
2183
2184 int
2185 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2186                                uint16_t nb_rx_desc,
2187                                const struct rte_eth_hairpin_conf *conf)
2188 {
2189         int ret;
2190         struct rte_eth_dev *dev;
2191         struct rte_eth_hairpin_cap cap;
2192         void **rxq;
2193         int i;
2194         int count;
2195
2196         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2197         dev = &rte_eth_devices[port_id];
2198
2199         if (rx_queue_id >= dev->data->nb_rx_queues) {
2200                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2201                 return -EINVAL;
2202         }
2203
2204         if (conf == NULL) {
2205                 RTE_ETHDEV_LOG(ERR,
2206                         "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
2207                         port_id);
2208                 return -EINVAL;
2209         }
2210
2211         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2212         if (ret != 0)
2213                 return ret;
2214         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2215                                 -ENOTSUP);
2216         /* if nb_rx_desc is zero use max number of desc from the driver. */
2217         if (nb_rx_desc == 0)
2218                 nb_rx_desc = cap.max_nb_desc;
2219         if (nb_rx_desc > cap.max_nb_desc) {
2220                 RTE_ETHDEV_LOG(ERR,
2221                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2222                         nb_rx_desc, cap.max_nb_desc);
2223                 return -EINVAL;
2224         }
2225         if (conf->peer_count > cap.max_rx_2_tx) {
2226                 RTE_ETHDEV_LOG(ERR,
2227                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2228                         conf->peer_count, cap.max_rx_2_tx);
2229                 return -EINVAL;
2230         }
2231         if (conf->peer_count == 0) {
2232                 RTE_ETHDEV_LOG(ERR,
2233                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2234                         conf->peer_count);
2235                 return -EINVAL;
2236         }
2237         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2238              cap.max_nb_queues != UINT16_MAX; i++) {
2239                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2240                         count++;
2241         }
2242         if (count > cap.max_nb_queues) {
2243                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2244                 cap.max_nb_queues);
2245                 return -EINVAL;
2246         }
2247         if (dev->data->dev_started)
2248                 return -EBUSY;
2249         rxq = dev->data->rx_queues;
2250         if (rxq[rx_queue_id] != NULL) {
2251                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2252                                         -ENOTSUP);
2253                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2254                 rxq[rx_queue_id] = NULL;
2255         }
2256         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2257                                                       nb_rx_desc, conf);
2258         if (ret == 0)
2259                 dev->data->rx_queue_state[rx_queue_id] =
2260                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2261         return eth_err(port_id, ret);
2262 }
2263
2264 int
2265 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2266                        uint16_t nb_tx_desc, unsigned int socket_id,
2267                        const struct rte_eth_txconf *tx_conf)
2268 {
2269         struct rte_eth_dev *dev;
2270         struct rte_eth_dev_info dev_info;
2271         struct rte_eth_txconf local_conf;
2272         void **txq;
2273         int ret;
2274
2275         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2276         dev = &rte_eth_devices[port_id];
2277
2278         if (tx_queue_id >= dev->data->nb_tx_queues) {
2279                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2280                 return -EINVAL;
2281         }
2282
2283         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2284
2285         ret = rte_eth_dev_info_get(port_id, &dev_info);
2286         if (ret != 0)
2287                 return ret;
2288
2289         /* Use default specified by driver, if nb_tx_desc is zero */
2290         if (nb_tx_desc == 0) {
2291                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2292                 /* If driver default is zero, fall back on EAL default */
2293                 if (nb_tx_desc == 0)
2294                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2295         }
2296         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2297             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2298             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2299                 RTE_ETHDEV_LOG(ERR,
2300                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2301                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2302                         dev_info.tx_desc_lim.nb_min,
2303                         dev_info.tx_desc_lim.nb_align);
2304                 return -EINVAL;
2305         }
2306
2307         if (dev->data->dev_started &&
2308                 !(dev_info.dev_capa &
2309                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2310                 return -EBUSY;
2311
2312         if (dev->data->dev_started &&
2313                 (dev->data->tx_queue_state[tx_queue_id] !=
2314                         RTE_ETH_QUEUE_STATE_STOPPED))
2315                 return -EBUSY;
2316
2317         txq = dev->data->tx_queues;
2318         if (txq[tx_queue_id]) {
2319                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2320                                         -ENOTSUP);
2321                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2322                 txq[tx_queue_id] = NULL;
2323         }
2324
2325         if (tx_conf == NULL)
2326                 tx_conf = &dev_info.default_txconf;
2327
2328         local_conf = *tx_conf;
2329
2330         /*
2331          * If an offloading has already been enabled in
2332          * rte_eth_dev_configure(), it has been enabled on all queues,
2333          * so there is no need to enable it in this queue again.
2334          * The local_conf.offloads input to underlying PMD only carries
2335          * those offloadings which are only enabled on this queue and
2336          * not enabled on all queues.
2337          */
2338         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2339
2340         /*
2341          * New added offloadings for this queue are those not enabled in
2342          * rte_eth_dev_configure() and they must be per-queue type.
2343          * A pure per-port offloading can't be enabled on a queue while
2344          * disabled on another queue. A pure per-port offloading can't
2345          * be enabled for any queue as new added one if it hasn't been
2346          * enabled in rte_eth_dev_configure().
2347          */
2348         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2349              local_conf.offloads) {
2350                 RTE_ETHDEV_LOG(ERR,
2351                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2352                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2353                         port_id, tx_queue_id, local_conf.offloads,
2354                         dev_info.tx_queue_offload_capa,
2355                         __func__);
2356                 return -EINVAL;
2357         }
2358
2359         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2360         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2361                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2362 }
2363
2364 int
2365 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2366                                uint16_t nb_tx_desc,
2367                                const struct rte_eth_hairpin_conf *conf)
2368 {
2369         struct rte_eth_dev *dev;
2370         struct rte_eth_hairpin_cap cap;
2371         void **txq;
2372         int i;
2373         int count;
2374         int ret;
2375
2376         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2377         dev = &rte_eth_devices[port_id];
2378
2379         if (tx_queue_id >= dev->data->nb_tx_queues) {
2380                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2381                 return -EINVAL;
2382         }
2383
2384         if (conf == NULL) {
2385                 RTE_ETHDEV_LOG(ERR,
2386                         "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2387                         port_id);
2388                 return -EINVAL;
2389         }
2390
2391         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2392         if (ret != 0)
2393                 return ret;
2394         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2395                                 -ENOTSUP);
2396         /* if nb_rx_desc is zero use max number of desc from the driver. */
2397         if (nb_tx_desc == 0)
2398                 nb_tx_desc = cap.max_nb_desc;
2399         if (nb_tx_desc > cap.max_nb_desc) {
2400                 RTE_ETHDEV_LOG(ERR,
2401                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2402                         nb_tx_desc, cap.max_nb_desc);
2403                 return -EINVAL;
2404         }
2405         if (conf->peer_count > cap.max_tx_2_rx) {
2406                 RTE_ETHDEV_LOG(ERR,
2407                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2408                         conf->peer_count, cap.max_tx_2_rx);
2409                 return -EINVAL;
2410         }
2411         if (conf->peer_count == 0) {
2412                 RTE_ETHDEV_LOG(ERR,
2413                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2414                         conf->peer_count);
2415                 return -EINVAL;
2416         }
2417         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2418              cap.max_nb_queues != UINT16_MAX; i++) {
2419                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2420                         count++;
2421         }
2422         if (count > cap.max_nb_queues) {
2423                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2424                 cap.max_nb_queues);
2425                 return -EINVAL;
2426         }
2427         if (dev->data->dev_started)
2428                 return -EBUSY;
2429         txq = dev->data->tx_queues;
2430         if (txq[tx_queue_id] != NULL) {
2431                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2432                                         -ENOTSUP);
2433                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2434                 txq[tx_queue_id] = NULL;
2435         }
2436         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2437                 (dev, tx_queue_id, nb_tx_desc, conf);
2438         if (ret == 0)
2439                 dev->data->tx_queue_state[tx_queue_id] =
2440                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2441         return eth_err(port_id, ret);
2442 }
2443
2444 int
2445 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2446 {
2447         struct rte_eth_dev *dev;
2448         int ret;
2449
2450         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2451         dev = &rte_eth_devices[tx_port];
2452
2453         if (dev->data->dev_started == 0) {
2454                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2455                 return -EBUSY;
2456         }
2457
2458         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2459         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2460         if (ret != 0)
2461                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2462                                " to Rx %d (%d - all ports)\n",
2463                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2464
2465         return ret;
2466 }
2467
2468 int
2469 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2470 {
2471         struct rte_eth_dev *dev;
2472         int ret;
2473
2474         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2475         dev = &rte_eth_devices[tx_port];
2476
2477         if (dev->data->dev_started == 0) {
2478                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2479                 return -EBUSY;
2480         }
2481
2482         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2483         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2484         if (ret != 0)
2485                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2486                                " from Rx %d (%d - all ports)\n",
2487                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2488
2489         return ret;
2490 }
2491
2492 int
2493 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2494                                size_t len, uint32_t direction)
2495 {
2496         struct rte_eth_dev *dev;
2497         int ret;
2498
2499         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2500         dev = &rte_eth_devices[port_id];
2501
2502         if (peer_ports == NULL) {
2503                 RTE_ETHDEV_LOG(ERR,
2504                         "Cannot get ethdev port %u hairpin peer ports to NULL\n",
2505                         port_id);
2506                 return -EINVAL;
2507         }
2508
2509         if (len == 0) {
2510                 RTE_ETHDEV_LOG(ERR,
2511                         "Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2512                         port_id);
2513                 return -EINVAL;
2514         }
2515
2516         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2517                                 -ENOTSUP);
2518
2519         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2520                                                       len, direction);
2521         if (ret < 0)
2522                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2523                                port_id, direction ? "Rx" : "Tx");
2524
2525         return ret;
2526 }
2527
2528 void
2529 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2530                 void *userdata __rte_unused)
2531 {
2532         rte_pktmbuf_free_bulk(pkts, unsent);
2533 }
2534
2535 void
2536 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2537                 void *userdata)
2538 {
2539         uint64_t *count = userdata;
2540
2541         rte_pktmbuf_free_bulk(pkts, unsent);
2542         *count += unsent;
2543 }
2544
2545 int
2546 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2547                 buffer_tx_error_fn cbfn, void *userdata)
2548 {
2549         if (buffer == NULL) {
2550                 RTE_ETHDEV_LOG(ERR,
2551                         "Cannot set Tx buffer error callback to NULL buffer\n");
2552                 return -EINVAL;
2553         }
2554
2555         buffer->error_callback = cbfn;
2556         buffer->error_userdata = userdata;
2557         return 0;
2558 }
2559
2560 int
2561 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2562 {
2563         int ret = 0;
2564
2565         if (buffer == NULL) {
2566                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2567                 return -EINVAL;
2568         }
2569
2570         buffer->size = size;
2571         if (buffer->error_callback == NULL) {
2572                 ret = rte_eth_tx_buffer_set_err_callback(
2573                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2574         }
2575
2576         return ret;
2577 }
2578
2579 int
2580 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2581 {
2582         struct rte_eth_dev *dev;
2583         int ret;
2584
2585         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2586         dev = &rte_eth_devices[port_id];
2587
2588         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2589
2590         /* Call driver to free pending mbufs. */
2591         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2592                                                free_cnt);
2593         return eth_err(port_id, ret);
2594 }
2595
2596 int
2597 rte_eth_promiscuous_enable(uint16_t port_id)
2598 {
2599         struct rte_eth_dev *dev;
2600         int diag = 0;
2601
2602         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2603         dev = &rte_eth_devices[port_id];
2604
2605         if (dev->data->promiscuous == 1)
2606                 return 0;
2607
2608         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2609
2610         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2611         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2612
2613         return eth_err(port_id, diag);
2614 }
2615
2616 int
2617 rte_eth_promiscuous_disable(uint16_t port_id)
2618 {
2619         struct rte_eth_dev *dev;
2620         int diag = 0;
2621
2622         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2623         dev = &rte_eth_devices[port_id];
2624
2625         if (dev->data->promiscuous == 0)
2626                 return 0;
2627
2628         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2629
2630         dev->data->promiscuous = 0;
2631         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2632         if (diag != 0)
2633                 dev->data->promiscuous = 1;
2634
2635         return eth_err(port_id, diag);
2636 }
2637
2638 int
2639 rte_eth_promiscuous_get(uint16_t port_id)
2640 {
2641         struct rte_eth_dev *dev;
2642
2643         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2644         dev = &rte_eth_devices[port_id];
2645
2646         return dev->data->promiscuous;
2647 }
2648
2649 int
2650 rte_eth_allmulticast_enable(uint16_t port_id)
2651 {
2652         struct rte_eth_dev *dev;
2653         int diag;
2654
2655         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2656         dev = &rte_eth_devices[port_id];
2657
2658         if (dev->data->all_multicast == 1)
2659                 return 0;
2660
2661         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2662         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2663         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2664
2665         return eth_err(port_id, diag);
2666 }
2667
2668 int
2669 rte_eth_allmulticast_disable(uint16_t port_id)
2670 {
2671         struct rte_eth_dev *dev;
2672         int diag;
2673
2674         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2675         dev = &rte_eth_devices[port_id];
2676
2677         if (dev->data->all_multicast == 0)
2678                 return 0;
2679
2680         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2681         dev->data->all_multicast = 0;
2682         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2683         if (diag != 0)
2684                 dev->data->all_multicast = 1;
2685
2686         return eth_err(port_id, diag);
2687 }
2688
2689 int
2690 rte_eth_allmulticast_get(uint16_t port_id)
2691 {
2692         struct rte_eth_dev *dev;
2693
2694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2695         dev = &rte_eth_devices[port_id];
2696
2697         return dev->data->all_multicast;
2698 }
2699
2700 int
2701 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2702 {
2703         struct rte_eth_dev *dev;
2704
2705         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2706         dev = &rte_eth_devices[port_id];
2707
2708         if (eth_link == NULL) {
2709                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2710                         port_id);
2711                 return -EINVAL;
2712         }
2713
2714         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2715                 rte_eth_linkstatus_get(dev, eth_link);
2716         else {
2717                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2718                 (*dev->dev_ops->link_update)(dev, 1);
2719                 *eth_link = dev->data->dev_link;
2720         }
2721
2722         return 0;
2723 }
2724
2725 int
2726 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2727 {
2728         struct rte_eth_dev *dev;
2729
2730         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2731         dev = &rte_eth_devices[port_id];
2732
2733         if (eth_link == NULL) {
2734                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2735                         port_id);
2736                 return -EINVAL;
2737         }
2738
2739         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2740                 rte_eth_linkstatus_get(dev, eth_link);
2741         else {
2742                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2743                 (*dev->dev_ops->link_update)(dev, 0);
2744                 *eth_link = dev->data->dev_link;
2745         }
2746
2747         return 0;
2748 }
2749
2750 const char *
2751 rte_eth_link_speed_to_str(uint32_t link_speed)
2752 {
2753         switch (link_speed) {
2754         case ETH_SPEED_NUM_NONE: return "None";
2755         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2756         case ETH_SPEED_NUM_100M: return "100 Mbps";
2757         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2758         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2759         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2760         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2761         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2762         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2763         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2764         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2765         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2766         case ETH_SPEED_NUM_100G: return "100 Gbps";
2767         case ETH_SPEED_NUM_200G: return "200 Gbps";
2768         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2769         default: return "Invalid";
2770         }
2771 }
2772
2773 int
2774 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2775 {
2776         if (str == NULL) {
2777                 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2778                 return -EINVAL;
2779         }
2780
2781         if (len == 0) {
2782                 RTE_ETHDEV_LOG(ERR,
2783                         "Cannot convert link to string with zero size\n");
2784                 return -EINVAL;
2785         }
2786
2787         if (eth_link == NULL) {
2788                 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2789                 return -EINVAL;
2790         }
2791
2792         if (eth_link->link_status == ETH_LINK_DOWN)
2793                 return snprintf(str, len, "Link down");
2794         else
2795                 return snprintf(str, len, "Link up at %s %s %s",
2796                         rte_eth_link_speed_to_str(eth_link->link_speed),
2797                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2798                         "FDX" : "HDX",
2799                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2800                         "Autoneg" : "Fixed");
2801 }
2802
2803 int
2804 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2805 {
2806         struct rte_eth_dev *dev;
2807
2808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2809         dev = &rte_eth_devices[port_id];
2810
2811         if (stats == NULL) {
2812                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2813                         port_id);
2814                 return -EINVAL;
2815         }
2816
2817         memset(stats, 0, sizeof(*stats));
2818
2819         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2820         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2821         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2822 }
2823
2824 int
2825 rte_eth_stats_reset(uint16_t port_id)
2826 {
2827         struct rte_eth_dev *dev;
2828         int ret;
2829
2830         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2831         dev = &rte_eth_devices[port_id];
2832
2833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2834         ret = (*dev->dev_ops->stats_reset)(dev);
2835         if (ret != 0)
2836                 return eth_err(port_id, ret);
2837
2838         dev->data->rx_mbuf_alloc_failed = 0;
2839
2840         return 0;
2841 }
2842
2843 static inline int
2844 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2845 {
2846         uint16_t nb_rxqs, nb_txqs;
2847         int count;
2848
2849         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2850         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2851
2852         count = RTE_NB_STATS;
2853         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2854                 count += nb_rxqs * RTE_NB_RXQ_STATS;
2855                 count += nb_txqs * RTE_NB_TXQ_STATS;
2856         }
2857
2858         return count;
2859 }
2860
2861 static int
2862 eth_dev_get_xstats_count(uint16_t port_id)
2863 {
2864         struct rte_eth_dev *dev;
2865         int count;
2866
2867         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2868         dev = &rte_eth_devices[port_id];
2869         if (dev->dev_ops->xstats_get_names != NULL) {
2870                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2871                 if (count < 0)
2872                         return eth_err(port_id, count);
2873         } else
2874                 count = 0;
2875
2876
2877         count += eth_dev_get_xstats_basic_count(dev);
2878
2879         return count;
2880 }
2881
2882 int
2883 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2884                 uint64_t *id)
2885 {
2886         int cnt_xstats, idx_xstat;
2887
2888         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2889
2890         if (xstat_name == NULL) {
2891                 RTE_ETHDEV_LOG(ERR,
2892                         "Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2893                         port_id);
2894                 return -ENOMEM;
2895         }
2896
2897         if (id == NULL) {
2898                 RTE_ETHDEV_LOG(ERR,
2899                         "Cannot get ethdev port %u xstats ID to NULL\n",
2900                         port_id);
2901                 return -ENOMEM;
2902         }
2903
2904         /* Get count */
2905         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2906         if (cnt_xstats  < 0) {
2907                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2908                 return -ENODEV;
2909         }
2910
2911         /* Get id-name lookup table */
2912         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2913
2914         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2915                         port_id, xstats_names, cnt_xstats, NULL)) {
2916                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2917                 return -1;
2918         }
2919
2920         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2921                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2922                         *id = idx_xstat;
2923                         return 0;
2924                 };
2925         }
2926
2927         return -EINVAL;
2928 }
2929
2930 /* retrieve basic stats names */
2931 static int
2932 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2933         struct rte_eth_xstat_name *xstats_names)
2934 {
2935         int cnt_used_entries = 0;
2936         uint32_t idx, id_queue;
2937         uint16_t num_q;
2938
2939         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2940                 strlcpy(xstats_names[cnt_used_entries].name,
2941                         eth_dev_stats_strings[idx].name,
2942                         sizeof(xstats_names[0].name));
2943                 cnt_used_entries++;
2944         }
2945
2946         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2947                 return cnt_used_entries;
2948
2949         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2950         for (id_queue = 0; id_queue < num_q; id_queue++) {
2951                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2952                         snprintf(xstats_names[cnt_used_entries].name,
2953                                 sizeof(xstats_names[0].name),
2954                                 "rx_q%u_%s",
2955                                 id_queue, eth_dev_rxq_stats_strings[idx].name);
2956                         cnt_used_entries++;
2957                 }
2958
2959         }
2960         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2961         for (id_queue = 0; id_queue < num_q; id_queue++) {
2962                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2963                         snprintf(xstats_names[cnt_used_entries].name,
2964                                 sizeof(xstats_names[0].name),
2965                                 "tx_q%u_%s",
2966                                 id_queue, eth_dev_txq_stats_strings[idx].name);
2967                         cnt_used_entries++;
2968                 }
2969         }
2970         return cnt_used_entries;
2971 }
2972
2973 /* retrieve ethdev extended statistics names */
2974 int
2975 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2976         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2977         uint64_t *ids)
2978 {
2979         struct rte_eth_xstat_name *xstats_names_copy;
2980         unsigned int no_basic_stat_requested = 1;
2981         unsigned int no_ext_stat_requested = 1;
2982         unsigned int expected_entries;
2983         unsigned int basic_count;
2984         struct rte_eth_dev *dev;
2985         unsigned int i;
2986         int ret;
2987
2988         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2989         dev = &rte_eth_devices[port_id];
2990
2991         basic_count = eth_dev_get_xstats_basic_count(dev);
2992         ret = eth_dev_get_xstats_count(port_id);
2993         if (ret < 0)
2994                 return ret;
2995         expected_entries = (unsigned int)ret;
2996
2997         /* Return max number of stats if no ids given */
2998         if (!ids) {
2999                 if (!xstats_names)
3000                         return expected_entries;
3001                 else if (xstats_names && size < expected_entries)
3002                         return expected_entries;
3003         }
3004
3005         if (ids && !xstats_names)
3006                 return -EINVAL;
3007
3008         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
3009                 uint64_t ids_copy[size];
3010
3011                 for (i = 0; i < size; i++) {
3012                         if (ids[i] < basic_count) {
3013                                 no_basic_stat_requested = 0;
3014                                 break;
3015                         }
3016
3017                         /*
3018                          * Convert ids to xstats ids that PMD knows.
3019                          * ids known by user are basic + extended stats.
3020                          */
3021                         ids_copy[i] = ids[i] - basic_count;
3022                 }
3023
3024                 if (no_basic_stat_requested)
3025                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
3026                                         ids_copy, xstats_names, size);
3027         }
3028
3029         /* Retrieve all stats */
3030         if (!ids) {
3031                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
3032                                 expected_entries);
3033                 if (num_stats < 0 || num_stats > (int)expected_entries)
3034                         return num_stats;
3035                 else
3036                         return expected_entries;
3037         }
3038
3039         xstats_names_copy = calloc(expected_entries,
3040                 sizeof(struct rte_eth_xstat_name));
3041
3042         if (!xstats_names_copy) {
3043                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
3044                 return -ENOMEM;
3045         }
3046
3047         if (ids) {
3048                 for (i = 0; i < size; i++) {
3049                         if (ids[i] >= basic_count) {
3050                                 no_ext_stat_requested = 0;
3051                                 break;
3052                         }
3053                 }
3054         }
3055
3056         /* Fill xstats_names_copy structure */
3057         if (ids && no_ext_stat_requested) {
3058                 eth_basic_stats_get_names(dev, xstats_names_copy);
3059         } else {
3060                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
3061                         expected_entries);
3062                 if (ret < 0) {
3063                         free(xstats_names_copy);
3064                         return ret;
3065                 }
3066         }
3067
3068         /* Filter stats */
3069         for (i = 0; i < size; i++) {
3070                 if (ids[i] >= expected_entries) {
3071                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3072                         free(xstats_names_copy);
3073                         return -1;
3074                 }
3075                 xstats_names[i] = xstats_names_copy[ids[i]];
3076         }
3077
3078         free(xstats_names_copy);
3079         return size;
3080 }
3081
3082 int
3083 rte_eth_xstats_get_names(uint16_t port_id,
3084         struct rte_eth_xstat_name *xstats_names,
3085         unsigned int size)
3086 {
3087         struct rte_eth_dev *dev;
3088         int cnt_used_entries;
3089         int cnt_expected_entries;
3090         int cnt_driver_entries;
3091
3092         cnt_expected_entries = eth_dev_get_xstats_count(port_id);
3093         if (xstats_names == NULL || cnt_expected_entries < 0 ||
3094                         (int)size < cnt_expected_entries)
3095                 return cnt_expected_entries;
3096
3097         /* port_id checked in eth_dev_get_xstats_count() */
3098         dev = &rte_eth_devices[port_id];
3099
3100         cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3101
3102         if (dev->dev_ops->xstats_get_names != NULL) {
3103                 /* If there are any driver-specific xstats, append them
3104                  * to end of list.
3105                  */
3106                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3107                         dev,
3108                         xstats_names + cnt_used_entries,
3109                         size - cnt_used_entries);
3110                 if (cnt_driver_entries < 0)
3111                         return eth_err(port_id, cnt_driver_entries);
3112                 cnt_used_entries += cnt_driver_entries;
3113         }
3114
3115         return cnt_used_entries;
3116 }
3117
3118
3119 static int
3120 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
3121 {
3122         struct rte_eth_dev *dev;
3123         struct rte_eth_stats eth_stats;
3124         unsigned int count = 0, i, q;
3125         uint64_t val, *stats_ptr;
3126         uint16_t nb_rxqs, nb_txqs;
3127         int ret;
3128
3129         ret = rte_eth_stats_get(port_id, &eth_stats);
3130         if (ret < 0)
3131                 return ret;
3132
3133         dev = &rte_eth_devices[port_id];
3134
3135         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3136         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3137
3138         /* global stats */
3139         for (i = 0; i < RTE_NB_STATS; i++) {
3140                 stats_ptr = RTE_PTR_ADD(&eth_stats,
3141                                         eth_dev_stats_strings[i].offset);
3142                 val = *stats_ptr;
3143                 xstats[count++].value = val;
3144         }
3145
3146         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3147                 return count;
3148
3149         /* per-rxq stats */
3150         for (q = 0; q < nb_rxqs; q++) {
3151                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3152                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3153                                         eth_dev_rxq_stats_strings[i].offset +
3154                                         q * sizeof(uint64_t));
3155                         val = *stats_ptr;
3156                         xstats[count++].value = val;
3157                 }
3158         }
3159
3160         /* per-txq stats */
3161         for (q = 0; q < nb_txqs; q++) {
3162                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3163                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3164                                         eth_dev_txq_stats_strings[i].offset +
3165                                         q * sizeof(uint64_t));
3166                         val = *stats_ptr;
3167                         xstats[count++].value = val;
3168                 }
3169         }
3170         return count;
3171 }
3172
3173 /* retrieve ethdev extended statistics */
3174 int
3175 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3176                          uint64_t *values, unsigned int size)
3177 {
3178         unsigned int no_basic_stat_requested = 1;
3179         unsigned int no_ext_stat_requested = 1;
3180         unsigned int num_xstats_filled;
3181         unsigned int basic_count;
3182         uint16_t expected_entries;
3183         struct rte_eth_dev *dev;
3184         unsigned int i;
3185         int ret;
3186
3187         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3188         dev = &rte_eth_devices[port_id];
3189
3190         ret = eth_dev_get_xstats_count(port_id);
3191         if (ret < 0)
3192                 return ret;
3193         expected_entries = (uint16_t)ret;
3194         struct rte_eth_xstat xstats[expected_entries];
3195         basic_count = eth_dev_get_xstats_basic_count(dev);
3196
3197         /* Return max number of stats if no ids given */
3198         if (!ids) {
3199                 if (!values)
3200                         return expected_entries;
3201                 else if (values && size < expected_entries)
3202                         return expected_entries;
3203         }
3204
3205         if (ids && !values)
3206                 return -EINVAL;
3207
3208         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3209                 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3210                 uint64_t ids_copy[size];
3211
3212                 for (i = 0; i < size; i++) {
3213                         if (ids[i] < basic_count) {
3214                                 no_basic_stat_requested = 0;
3215                                 break;
3216                         }
3217
3218                         /*
3219                          * Convert ids to xstats ids that PMD knows.
3220                          * ids known by user are basic + extended stats.
3221                          */
3222                         ids_copy[i] = ids[i] - basic_count;
3223                 }
3224
3225                 if (no_basic_stat_requested)
3226                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3227                                         values, size);
3228         }
3229
3230         if (ids) {
3231                 for (i = 0; i < size; i++) {
3232                         if (ids[i] >= basic_count) {
3233                                 no_ext_stat_requested = 0;
3234                                 break;
3235                         }
3236                 }
3237         }
3238
3239         /* Fill the xstats structure */
3240         if (ids && no_ext_stat_requested)
3241                 ret = eth_basic_stats_get(port_id, xstats);
3242         else
3243                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3244
3245         if (ret < 0)
3246                 return ret;
3247         num_xstats_filled = (unsigned int)ret;
3248
3249         /* Return all stats */
3250         if (!ids) {
3251                 for (i = 0; i < num_xstats_filled; i++)
3252                         values[i] = xstats[i].value;
3253                 return expected_entries;
3254         }
3255
3256         /* Filter stats */
3257         for (i = 0; i < size; i++) {
3258                 if (ids[i] >= expected_entries) {
3259                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3260                         return -1;
3261                 }
3262                 values[i] = xstats[ids[i]].value;
3263         }
3264         return size;
3265 }
3266
3267 int
3268 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3269         unsigned int n)
3270 {
3271         struct rte_eth_dev *dev;
3272         unsigned int count = 0, i;
3273         signed int xcount = 0;
3274         uint16_t nb_rxqs, nb_txqs;
3275         int ret;
3276
3277         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3278         dev = &rte_eth_devices[port_id];
3279
3280         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3281         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3282
3283         /* Return generic statistics */
3284         count = RTE_NB_STATS;
3285         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3286                 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3287
3288         /* implemented by the driver */
3289         if (dev->dev_ops->xstats_get != NULL) {
3290                 /* Retrieve the xstats from the driver at the end of the
3291                  * xstats struct.
3292                  */
3293                 xcount = (*dev->dev_ops->xstats_get)(dev,
3294                                      xstats ? xstats + count : NULL,
3295                                      (n > count) ? n - count : 0);
3296
3297                 if (xcount < 0)
3298                         return eth_err(port_id, xcount);
3299         }
3300
3301         if (n < count + xcount || xstats == NULL)
3302                 return count + xcount;
3303
3304         /* now fill the xstats structure */
3305         ret = eth_basic_stats_get(port_id, xstats);
3306         if (ret < 0)
3307                 return ret;
3308         count = ret;
3309
3310         for (i = 0; i < count; i++)
3311                 xstats[i].id = i;
3312         /* add an offset to driver-specific stats */
3313         for ( ; i < count + xcount; i++)
3314                 xstats[i].id += count;
3315
3316         return count + xcount;
3317 }
3318
3319 /* reset ethdev extended statistics */
3320 int
3321 rte_eth_xstats_reset(uint16_t port_id)
3322 {
3323         struct rte_eth_dev *dev;
3324
3325         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3326         dev = &rte_eth_devices[port_id];
3327
3328         /* implemented by the driver */
3329         if (dev->dev_ops->xstats_reset != NULL)
3330                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3331
3332         /* fallback to default */
3333         return rte_eth_stats_reset(port_id);
3334 }
3335
3336 static int
3337 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3338                 uint8_t stat_idx, uint8_t is_rx)
3339 {
3340         struct rte_eth_dev *dev;
3341
3342         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3343         dev = &rte_eth_devices[port_id];
3344
3345         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3346                 return -EINVAL;
3347
3348         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3349                 return -EINVAL;
3350
3351         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3352                 return -EINVAL;
3353
3354         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3355         return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3356 }
3357
3358 int
3359 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3360                 uint8_t stat_idx)
3361 {
3362         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3363                                                 tx_queue_id,
3364                                                 stat_idx, STAT_QMAP_TX));
3365 }
3366
3367 int
3368 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3369                 uint8_t stat_idx)
3370 {
3371         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3372                                                 rx_queue_id,
3373                                                 stat_idx, STAT_QMAP_RX));
3374 }
3375
3376 int
3377 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3378 {
3379         struct rte_eth_dev *dev;
3380
3381         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3382         dev = &rte_eth_devices[port_id];
3383
3384         if (fw_version == NULL && fw_size > 0) {
3385                 RTE_ETHDEV_LOG(ERR,
3386                         "Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3387                         port_id);
3388                 return -EINVAL;
3389         }
3390
3391         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3392         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3393                                                         fw_version, fw_size));
3394 }
3395
3396 int
3397 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3398 {
3399         struct rte_eth_dev *dev;
3400         const struct rte_eth_desc_lim lim = {
3401                 .nb_max = UINT16_MAX,
3402                 .nb_min = 0,
3403                 .nb_align = 1,
3404                 .nb_seg_max = UINT16_MAX,
3405                 .nb_mtu_seg_max = UINT16_MAX,
3406         };
3407         int diag;
3408
3409         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3410         dev = &rte_eth_devices[port_id];
3411
3412         if (dev_info == NULL) {
3413                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3414                         port_id);
3415                 return -EINVAL;
3416         }
3417
3418         /*
3419          * Init dev_info before port_id check since caller does not have
3420          * return status and does not know if get is successful or not.
3421          */
3422         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3423         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3424
3425         dev_info->rx_desc_lim = lim;
3426         dev_info->tx_desc_lim = lim;
3427         dev_info->device = dev->device;
3428         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3429         dev_info->max_mtu = UINT16_MAX;
3430
3431         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3432         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3433         if (diag != 0) {
3434                 /* Cleanup already filled in device information */
3435                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3436                 return eth_err(port_id, diag);
3437         }
3438
3439         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3440         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3441                         RTE_MAX_QUEUES_PER_PORT);
3442         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3443                         RTE_MAX_QUEUES_PER_PORT);
3444
3445         dev_info->driver_name = dev->device->driver->name;
3446         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3447         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3448
3449         dev_info->dev_flags = &dev->data->dev_flags;
3450
3451         return 0;
3452 }
3453
3454 int
3455 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3456                                  uint32_t *ptypes, int num)
3457 {
3458         int i, j;
3459         struct rte_eth_dev *dev;
3460         const uint32_t *all_ptypes;
3461
3462         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3463         dev = &rte_eth_devices[port_id];
3464
3465         if (ptypes == NULL && num > 0) {
3466                 RTE_ETHDEV_LOG(ERR,
3467                         "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3468                         port_id);
3469                 return -EINVAL;
3470         }
3471
3472         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3473         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3474
3475         if (!all_ptypes)
3476                 return 0;
3477
3478         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3479                 if (all_ptypes[i] & ptype_mask) {
3480                         if (j < num)
3481                                 ptypes[j] = all_ptypes[i];
3482                         j++;
3483                 }
3484
3485         return j;
3486 }
3487
3488 int
3489 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3490                                  uint32_t *set_ptypes, unsigned int num)
3491 {
3492         const uint32_t valid_ptype_masks[] = {
3493                 RTE_PTYPE_L2_MASK,
3494                 RTE_PTYPE_L3_MASK,
3495                 RTE_PTYPE_L4_MASK,
3496                 RTE_PTYPE_TUNNEL_MASK,
3497                 RTE_PTYPE_INNER_L2_MASK,
3498                 RTE_PTYPE_INNER_L3_MASK,
3499                 RTE_PTYPE_INNER_L4_MASK,
3500         };
3501         const uint32_t *all_ptypes;
3502         struct rte_eth_dev *dev;
3503         uint32_t unused_mask;
3504         unsigned int i, j;
3505         int ret;
3506
3507         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3508         dev = &rte_eth_devices[port_id];
3509
3510         if (num > 0 && set_ptypes == NULL) {
3511                 RTE_ETHDEV_LOG(ERR,
3512                         "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3513                         port_id);
3514                 return -EINVAL;
3515         }
3516
3517         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3518                         *dev->dev_ops->dev_ptypes_set == NULL) {
3519                 ret = 0;
3520                 goto ptype_unknown;
3521         }
3522
3523         if (ptype_mask == 0) {
3524                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3525                                 ptype_mask);
3526                 goto ptype_unknown;
3527         }
3528
3529         unused_mask = ptype_mask;
3530         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3531                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3532                 if (mask && mask != valid_ptype_masks[i]) {
3533                         ret = -EINVAL;
3534                         goto ptype_unknown;
3535                 }
3536                 unused_mask &= ~valid_ptype_masks[i];
3537         }
3538
3539         if (unused_mask) {
3540                 ret = -EINVAL;
3541                 goto ptype_unknown;
3542         }
3543
3544         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3545         if (all_ptypes == NULL) {
3546                 ret = 0;
3547                 goto ptype_unknown;
3548         }
3549
3550         /*
3551          * Accommodate as many set_ptypes as possible. If the supplied
3552          * set_ptypes array is insufficient fill it partially.
3553          */
3554         for (i = 0, j = 0; set_ptypes != NULL &&
3555                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3556                 if (ptype_mask & all_ptypes[i]) {
3557                         if (j < num - 1) {
3558                                 set_ptypes[j] = all_ptypes[i];
3559                                 j++;
3560                                 continue;
3561                         }
3562                         break;
3563                 }
3564         }
3565
3566         if (set_ptypes != NULL && j < num)
3567                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3568
3569         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3570
3571 ptype_unknown:
3572         if (num > 0)
3573                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3574
3575         return ret;
3576 }
3577
3578 int
3579 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3580 {
3581         struct rte_eth_dev *dev;
3582
3583         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3584         dev = &rte_eth_devices[port_id];
3585
3586         if (mac_addr == NULL) {
3587                 RTE_ETHDEV_LOG(ERR,
3588                         "Cannot get ethdev port %u MAC address to NULL\n",
3589                         port_id);
3590                 return -EINVAL;
3591         }
3592
3593         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3594
3595         return 0;
3596 }
3597
3598 int
3599 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3600 {
3601         struct rte_eth_dev *dev;
3602
3603         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3604         dev = &rte_eth_devices[port_id];
3605
3606         if (mtu == NULL) {
3607                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3608                         port_id);
3609                 return -EINVAL;
3610         }
3611
3612         *mtu = dev->data->mtu;
3613         return 0;
3614 }
3615
3616 int
3617 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3618 {
3619         int ret;
3620         struct rte_eth_dev_info dev_info;
3621         struct rte_eth_dev *dev;
3622
3623         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3624         dev = &rte_eth_devices[port_id];
3625         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3626
3627         /*
3628          * Check if the device supports dev_infos_get, if it does not
3629          * skip min_mtu/max_mtu validation here as this requires values
3630          * that are populated within the call to rte_eth_dev_info_get()
3631          * which relies on dev->dev_ops->dev_infos_get.
3632          */
3633         if (*dev->dev_ops->dev_infos_get != NULL) {
3634                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3635                 if (ret != 0)
3636                         return ret;
3637
3638                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3639                         return -EINVAL;
3640         }
3641
3642         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3643         if (!ret)
3644                 dev->data->mtu = mtu;
3645
3646         return eth_err(port_id, ret);
3647 }
3648
3649 int
3650 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3651 {
3652         struct rte_eth_dev *dev;
3653         int ret;
3654
3655         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3656         dev = &rte_eth_devices[port_id];
3657
3658         if (!(dev->data->dev_conf.rxmode.offloads &
3659               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3660                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3661                         port_id);
3662                 return -ENOSYS;
3663         }
3664
3665         if (vlan_id > 4095) {
3666                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3667                         port_id, vlan_id);
3668                 return -EINVAL;
3669         }
3670         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3671
3672         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3673         if (ret == 0) {
3674                 struct rte_vlan_filter_conf *vfc;
3675                 int vidx;
3676                 int vbit;
3677
3678                 vfc = &dev->data->vlan_filter_conf;
3679                 vidx = vlan_id / 64;
3680                 vbit = vlan_id % 64;
3681
3682                 if (on)
3683                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3684                 else
3685                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3686         }
3687
3688         return eth_err(port_id, ret);
3689 }
3690
3691 int
3692 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3693                                     int on)
3694 {
3695         struct rte_eth_dev *dev;
3696
3697         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3698         dev = &rte_eth_devices[port_id];
3699
3700         if (rx_queue_id >= dev->data->nb_rx_queues) {
3701                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3702                 return -EINVAL;
3703         }
3704
3705         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3706         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3707
3708         return 0;
3709 }
3710
3711 int
3712 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3713                                 enum rte_vlan_type vlan_type,
3714                                 uint16_t tpid)
3715 {
3716         struct rte_eth_dev *dev;
3717
3718         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3719         dev = &rte_eth_devices[port_id];
3720
3721         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3722         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3723                                                                tpid));
3724 }
3725
3726 int
3727 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3728 {
3729         struct rte_eth_dev_info dev_info;
3730         struct rte_eth_dev *dev;
3731         int ret = 0;
3732         int mask = 0;
3733         int cur, org = 0;
3734         uint64_t orig_offloads;
3735         uint64_t dev_offloads;
3736         uint64_t new_offloads;
3737
3738         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3739         dev = &rte_eth_devices[port_id];
3740
3741         /* save original values in case of failure */
3742         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3743         dev_offloads = orig_offloads;
3744
3745         /* check which option changed by application */
3746         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3747         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3748         if (cur != org) {
3749                 if (cur)
3750                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3751                 else
3752                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3753                 mask |= ETH_VLAN_STRIP_MASK;
3754         }
3755
3756         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3757         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3758         if (cur != org) {
3759                 if (cur)
3760                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3761                 else
3762                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3763                 mask |= ETH_VLAN_FILTER_MASK;
3764         }
3765
3766         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3767         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3768         if (cur != org) {
3769                 if (cur)
3770                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3771                 else
3772                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3773                 mask |= ETH_VLAN_EXTEND_MASK;
3774         }
3775
3776         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3777         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3778         if (cur != org) {
3779                 if (cur)
3780                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3781                 else
3782                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3783                 mask |= ETH_QINQ_STRIP_MASK;
3784         }
3785
3786         /*no change*/
3787         if (mask == 0)
3788                 return ret;
3789
3790         ret = rte_eth_dev_info_get(port_id, &dev_info);
3791         if (ret != 0)
3792                 return ret;
3793
3794         /* Rx VLAN offloading must be within its device capabilities */
3795         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3796                 new_offloads = dev_offloads & ~orig_offloads;
3797                 RTE_ETHDEV_LOG(ERR,
3798                         "Ethdev port_id=%u requested new added VLAN offloads "
3799                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3800                         "0x%" PRIx64 " in %s()\n",
3801                         port_id, new_offloads, dev_info.rx_offload_capa,
3802                         __func__);
3803                 return -EINVAL;
3804         }
3805
3806         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3807         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3808         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3809         if (ret) {
3810                 /* hit an error restore  original values */
3811                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3812         }
3813
3814         return eth_err(port_id, ret);
3815 }
3816
3817 int
3818 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3819 {
3820         struct rte_eth_dev *dev;
3821         uint64_t *dev_offloads;
3822         int ret = 0;
3823
3824         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3825         dev = &rte_eth_devices[port_id];
3826         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3827
3828         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3829                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3830
3831         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3832                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3833
3834         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3835                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3836
3837         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3838                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3839
3840         return ret;
3841 }
3842
3843 int
3844 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3845 {
3846         struct rte_eth_dev *dev;
3847
3848         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3849         dev = &rte_eth_devices[port_id];
3850
3851         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3852         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3853 }
3854
3855 int
3856 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3857 {
3858         struct rte_eth_dev *dev;
3859
3860         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3861         dev = &rte_eth_devices[port_id];
3862
3863         if (fc_conf == NULL) {
3864                 RTE_ETHDEV_LOG(ERR,
3865                         "Cannot get ethdev port %u flow control config to NULL\n",
3866                         port_id);
3867                 return -EINVAL;
3868         }
3869
3870         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3871         memset(fc_conf, 0, sizeof(*fc_conf));
3872         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3873 }
3874
3875 int
3876 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3877 {
3878         struct rte_eth_dev *dev;
3879
3880         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3881         dev = &rte_eth_devices[port_id];
3882
3883         if (fc_conf == NULL) {
3884                 RTE_ETHDEV_LOG(ERR,
3885                         "Cannot set ethdev port %u flow control from NULL config\n",
3886                         port_id);
3887                 return -EINVAL;
3888         }
3889
3890         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3891                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3892                 return -EINVAL;
3893         }
3894
3895         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3896         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3897 }
3898
3899 int
3900 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3901                                    struct rte_eth_pfc_conf *pfc_conf)
3902 {
3903         struct rte_eth_dev *dev;
3904
3905         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3906         dev = &rte_eth_devices[port_id];
3907
3908         if (pfc_conf == NULL) {
3909                 RTE_ETHDEV_LOG(ERR,
3910                         "Cannot set ethdev port %u priority flow control from NULL config\n",
3911                         port_id);
3912                 return -EINVAL;
3913         }
3914
3915         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3916                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3917                 return -EINVAL;
3918         }
3919
3920         /* High water, low water validation are device specific */
3921         if  (*dev->dev_ops->priority_flow_ctrl_set)
3922                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3923                                         (dev, pfc_conf));
3924         return -ENOTSUP;
3925 }
3926
3927 static int
3928 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3929                         uint16_t reta_size)
3930 {
3931         uint16_t i, num;
3932
3933         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3934         for (i = 0; i < num; i++) {
3935                 if (reta_conf[i].mask)
3936                         return 0;
3937         }
3938
3939         return -EINVAL;
3940 }
3941
3942 static int
3943 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3944                          uint16_t reta_size,
3945                          uint16_t max_rxq)
3946 {
3947         uint16_t i, idx, shift;
3948
3949         if (max_rxq == 0) {
3950                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3951                 return -EINVAL;
3952         }
3953
3954         for (i = 0; i < reta_size; i++) {
3955                 idx = i / RTE_RETA_GROUP_SIZE;
3956                 shift = i % RTE_RETA_GROUP_SIZE;
3957                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3958                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3959                         RTE_ETHDEV_LOG(ERR,
3960                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3961                                 idx, shift,
3962                                 reta_conf[idx].reta[shift], max_rxq);
3963                         return -EINVAL;
3964                 }
3965         }
3966
3967         return 0;
3968 }
3969
3970 int
3971 rte_eth_dev_rss_reta_update(uint16_t port_id,
3972                             struct rte_eth_rss_reta_entry64 *reta_conf,
3973                             uint16_t reta_size)
3974 {
3975         struct rte_eth_dev *dev;
3976         int ret;
3977
3978         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3979         dev = &rte_eth_devices[port_id];
3980
3981         if (reta_conf == NULL) {
3982                 RTE_ETHDEV_LOG(ERR,
3983                         "Cannot update ethdev port %u RSS RETA to NULL\n",
3984                         port_id);
3985                 return -EINVAL;
3986         }
3987
3988         if (reta_size == 0) {
3989                 RTE_ETHDEV_LOG(ERR,
3990                         "Cannot update ethdev port %u RSS RETA with zero size\n",
3991                         port_id);
3992                 return -EINVAL;
3993         }
3994
3995         /* Check mask bits */
3996         ret = eth_check_reta_mask(reta_conf, reta_size);
3997         if (ret < 0)
3998                 return ret;
3999
4000         /* Check entry value */
4001         ret = eth_check_reta_entry(reta_conf, reta_size,
4002                                 dev->data->nb_rx_queues);
4003         if (ret < 0)
4004                 return ret;
4005
4006         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
4007         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
4008                                                              reta_size));
4009 }
4010
4011 int
4012 rte_eth_dev_rss_reta_query(uint16_t port_id,
4013                            struct rte_eth_rss_reta_entry64 *reta_conf,
4014                            uint16_t reta_size)
4015 {
4016         struct rte_eth_dev *dev;
4017         int ret;
4018
4019         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4020         dev = &rte_eth_devices[port_id];
4021
4022         if (reta_conf == NULL) {
4023                 RTE_ETHDEV_LOG(ERR,
4024                         "Cannot query ethdev port %u RSS RETA from NULL config\n",
4025                         port_id);
4026                 return -EINVAL;
4027         }
4028
4029         /* Check mask bits */
4030         ret = eth_check_reta_mask(reta_conf, reta_size);
4031         if (ret < 0)
4032                 return ret;
4033
4034         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
4035         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4036                                                             reta_size));
4037 }
4038
4039 int
4040 rte_eth_dev_rss_hash_update(uint16_t port_id,
4041                             struct rte_eth_rss_conf *rss_conf)
4042 {
4043         struct rte_eth_dev *dev;
4044         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
4045         int ret;
4046
4047         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4048         dev = &rte_eth_devices[port_id];
4049
4050         if (rss_conf == NULL) {
4051                 RTE_ETHDEV_LOG(ERR,
4052                         "Cannot update ethdev port %u RSS hash from NULL config\n",
4053                         port_id);
4054                 return -EINVAL;
4055         }
4056
4057         ret = rte_eth_dev_info_get(port_id, &dev_info);
4058         if (ret != 0)
4059                 return ret;
4060
4061         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4062         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4063             dev_info.flow_type_rss_offloads) {
4064                 RTE_ETHDEV_LOG(ERR,
4065                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4066                         port_id, rss_conf->rss_hf,
4067                         dev_info.flow_type_rss_offloads);
4068                 return -EINVAL;
4069         }
4070         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
4071         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4072                                                                  rss_conf));
4073 }
4074
4075 int
4076 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4077                               struct rte_eth_rss_conf *rss_conf)
4078 {
4079         struct rte_eth_dev *dev;
4080
4081         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4082         dev = &rte_eth_devices[port_id];
4083
4084         if (rss_conf == NULL) {
4085                 RTE_ETHDEV_LOG(ERR,
4086                         "Cannot get ethdev port %u RSS hash config to NULL\n",
4087                         port_id);
4088                 return -EINVAL;
4089         }
4090
4091         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
4092         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4093                                                                    rss_conf));
4094 }
4095
4096 int
4097 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4098                                 struct rte_eth_udp_tunnel *udp_tunnel)
4099 {
4100         struct rte_eth_dev *dev;
4101
4102         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4103         dev = &rte_eth_devices[port_id];
4104
4105         if (udp_tunnel == NULL) {
4106                 RTE_ETHDEV_LOG(ERR,
4107                         "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4108                         port_id);
4109                 return -EINVAL;
4110         }
4111
4112         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
4113                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4114                 return -EINVAL;
4115         }
4116
4117         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
4118         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4119                                                                 udp_tunnel));
4120 }
4121
4122 int
4123 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4124                                    struct rte_eth_udp_tunnel *udp_tunnel)
4125 {
4126         struct rte_eth_dev *dev;
4127
4128         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4129         dev = &rte_eth_devices[port_id];
4130
4131         if (udp_tunnel == NULL) {
4132                 RTE_ETHDEV_LOG(ERR,
4133                         "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4134                         port_id);
4135                 return -EINVAL;
4136         }
4137
4138         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
4139                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4140                 return -EINVAL;
4141         }
4142
4143         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
4144         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4145                                                                 udp_tunnel));
4146 }
4147
4148 int
4149 rte_eth_led_on(uint16_t port_id)
4150 {
4151         struct rte_eth_dev *dev;
4152
4153         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4154         dev = &rte_eth_devices[port_id];
4155
4156         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
4157         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4158 }
4159
4160 int
4161 rte_eth_led_off(uint16_t port_id)
4162 {
4163         struct rte_eth_dev *dev;
4164
4165         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4166         dev = &rte_eth_devices[port_id];
4167
4168         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
4169         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4170 }
4171
4172 int
4173 rte_eth_fec_get_capability(uint16_t port_id,
4174                            struct rte_eth_fec_capa *speed_fec_capa,
4175                            unsigned int num)
4176 {
4177         struct rte_eth_dev *dev;
4178         int ret;
4179
4180         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4181         dev = &rte_eth_devices[port_id];
4182
4183         if (speed_fec_capa == NULL && num > 0) {
4184                 RTE_ETHDEV_LOG(ERR,
4185                         "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4186                         port_id);
4187                 return -EINVAL;
4188         }
4189
4190         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
4191         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4192
4193         return ret;
4194 }
4195
4196 int
4197 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4198 {
4199         struct rte_eth_dev *dev;
4200
4201         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4202         dev = &rte_eth_devices[port_id];
4203
4204         if (fec_capa == NULL) {
4205                 RTE_ETHDEV_LOG(ERR,
4206                         "Cannot get ethdev port %u current FEC mode to NULL\n",
4207                         port_id);
4208                 return -EINVAL;
4209         }
4210
4211         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
4212         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4213 }
4214
4215 int
4216 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4217 {
4218         struct rte_eth_dev *dev;
4219
4220         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4221         dev = &rte_eth_devices[port_id];
4222
4223         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
4224         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4225 }
4226
4227 /*
4228  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4229  * an empty spot.
4230  */
4231 static int
4232 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4233 {
4234         struct rte_eth_dev_info dev_info;
4235         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4236         unsigned i;
4237         int ret;
4238
4239         ret = rte_eth_dev_info_get(port_id, &dev_info);
4240         if (ret != 0)
4241                 return -1;
4242
4243         for (i = 0; i < dev_info.max_mac_addrs; i++)
4244                 if (memcmp(addr, &dev->data->mac_addrs[i],
4245                                 RTE_ETHER_ADDR_LEN) == 0)
4246                         return i;
4247
4248         return -1;
4249 }
4250
4251 static const struct rte_ether_addr null_mac_addr;
4252
4253 int
4254 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4255                         uint32_t pool)
4256 {
4257         struct rte_eth_dev *dev;
4258         int index;
4259         uint64_t pool_mask;
4260         int ret;
4261
4262         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4263         dev = &rte_eth_devices[port_id];
4264
4265         if (addr == NULL) {
4266                 RTE_ETHDEV_LOG(ERR,
4267                         "Cannot add ethdev port %u MAC address from NULL address\n",
4268                         port_id);
4269                 return -EINVAL;
4270         }
4271
4272         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4273
4274         if (rte_is_zero_ether_addr(addr)) {
4275                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4276                         port_id);
4277                 return -EINVAL;
4278         }
4279         if (pool >= ETH_64_POOLS) {
4280                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
4281                 return -EINVAL;
4282         }
4283
4284         index = eth_dev_get_mac_addr_index(port_id, addr);
4285         if (index < 0) {
4286                 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4287                 if (index < 0) {
4288                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4289                                 port_id);
4290                         return -ENOSPC;
4291                 }
4292         } else {
4293                 pool_mask = dev->data->mac_pool_sel[index];
4294
4295                 /* Check if both MAC address and pool is already there, and do nothing */
4296                 if (pool_mask & (1ULL << pool))
4297                         return 0;
4298         }
4299
4300         /* Update NIC */
4301         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4302
4303         if (ret == 0) {
4304                 /* Update address in NIC data structure */
4305                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4306
4307                 /* Update pool bitmap in NIC data structure */
4308                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
4309         }
4310
4311         return eth_err(port_id, ret);
4312 }
4313
4314 int
4315 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4316 {
4317         struct rte_eth_dev *dev;
4318         int index;
4319
4320         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4321         dev = &rte_eth_devices[port_id];
4322
4323         if (addr == NULL) {
4324                 RTE_ETHDEV_LOG(ERR,
4325                         "Cannot remove ethdev port %u MAC address from NULL address\n",
4326                         port_id);
4327                 return -EINVAL;
4328         }
4329
4330         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4331
4332         index = eth_dev_get_mac_addr_index(port_id, addr);
4333         if (index == 0) {
4334                 RTE_ETHDEV_LOG(ERR,
4335                         "Port %u: Cannot remove default MAC address\n",
4336                         port_id);
4337                 return -EADDRINUSE;
4338         } else if (index < 0)
4339                 return 0;  /* Do nothing if address wasn't found */
4340
4341         /* Update NIC */
4342         (*dev->dev_ops->mac_addr_remove)(dev, index);
4343
4344         /* Update address in NIC data structure */
4345         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4346
4347         /* reset pool bitmap */
4348         dev->data->mac_pool_sel[index] = 0;
4349
4350         return 0;
4351 }
4352
4353 int
4354 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4355 {
4356         struct rte_eth_dev *dev;
4357         int ret;
4358
4359         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4360         dev = &rte_eth_devices[port_id];
4361
4362         if (addr == NULL) {
4363                 RTE_ETHDEV_LOG(ERR,
4364                         "Cannot set ethdev port %u default MAC address from NULL address\n",
4365                         port_id);
4366                 return -EINVAL;
4367         }
4368
4369         if (!rte_is_valid_assigned_ether_addr(addr))
4370                 return -EINVAL;
4371
4372         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4373
4374         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4375         if (ret < 0)
4376                 return ret;
4377
4378         /* Update default address in NIC data structure */
4379         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4380
4381         return 0;
4382 }
4383
4384
4385 /*
4386  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4387  * an empty spot.
4388  */
4389 static int
4390 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4391                 const struct rte_ether_addr *addr)
4392 {
4393         struct rte_eth_dev_info dev_info;
4394         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4395         unsigned i;
4396         int ret;
4397
4398         ret = rte_eth_dev_info_get(port_id, &dev_info);
4399         if (ret != 0)
4400                 return -1;
4401
4402         if (!dev->data->hash_mac_addrs)
4403                 return -1;
4404
4405         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4406                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4407                         RTE_ETHER_ADDR_LEN) == 0)
4408                         return i;
4409
4410         return -1;
4411 }
4412
4413 int
4414 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4415                                 uint8_t on)
4416 {
4417         int index;
4418         int ret;
4419         struct rte_eth_dev *dev;
4420
4421         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4422         dev = &rte_eth_devices[port_id];
4423
4424         if (addr == NULL) {
4425                 RTE_ETHDEV_LOG(ERR,
4426                         "Cannot set ethdev port %u unicast hash table from NULL address\n",
4427                         port_id);
4428                 return -EINVAL;
4429         }
4430
4431         if (rte_is_zero_ether_addr(addr)) {
4432                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4433                         port_id);
4434                 return -EINVAL;
4435         }
4436
4437         index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4438         /* Check if it's already there, and do nothing */
4439         if ((index >= 0) && on)
4440                 return 0;
4441
4442         if (index < 0) {
4443                 if (!on) {
4444                         RTE_ETHDEV_LOG(ERR,
4445                                 "Port %u: the MAC address was not set in UTA\n",
4446                                 port_id);
4447                         return -EINVAL;
4448                 }
4449
4450                 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4451                 if (index < 0) {
4452                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4453                                 port_id);
4454                         return -ENOSPC;
4455                 }
4456         }
4457
4458         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4459         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4460         if (ret == 0) {
4461                 /* Update address in NIC data structure */
4462                 if (on)
4463                         rte_ether_addr_copy(addr,
4464                                         &dev->data->hash_mac_addrs[index]);
4465                 else
4466                         rte_ether_addr_copy(&null_mac_addr,
4467                                         &dev->data->hash_mac_addrs[index]);
4468         }
4469
4470         return eth_err(port_id, ret);
4471 }
4472
4473 int
4474 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4475 {
4476         struct rte_eth_dev *dev;
4477
4478         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4479         dev = &rte_eth_devices[port_id];
4480
4481         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4482         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4483                                                                        on));
4484 }
4485
4486 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4487                                         uint16_t tx_rate)
4488 {
4489         struct rte_eth_dev *dev;
4490         struct rte_eth_dev_info dev_info;
4491         struct rte_eth_link link;
4492         int ret;
4493
4494         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4495         dev = &rte_eth_devices[port_id];
4496
4497         ret = rte_eth_dev_info_get(port_id, &dev_info);
4498         if (ret != 0)
4499                 return ret;
4500
4501         link = dev->data->dev_link;
4502
4503         if (queue_idx > dev_info.max_tx_queues) {
4504                 RTE_ETHDEV_LOG(ERR,
4505                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4506                         port_id, queue_idx);
4507                 return -EINVAL;
4508         }
4509
4510         if (tx_rate > link.link_speed) {
4511                 RTE_ETHDEV_LOG(ERR,
4512                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4513                         tx_rate, link.link_speed);
4514                 return -EINVAL;
4515         }
4516
4517         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4518         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4519                                                         queue_idx, tx_rate));
4520 }
4521
4522 int
4523 rte_eth_mirror_rule_set(uint16_t port_id,
4524                         struct rte_eth_mirror_conf *mirror_conf,
4525                         uint8_t rule_id, uint8_t on)
4526 {
4527         struct rte_eth_dev *dev;
4528
4529         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4530         dev = &rte_eth_devices[port_id];
4531
4532         if (mirror_conf == NULL) {
4533                 RTE_ETHDEV_LOG(ERR,
4534                         "Cannot set ethdev port %u mirror rule from NULL config\n",
4535                         port_id);
4536                 return -EINVAL;
4537         }
4538
4539         if (mirror_conf->rule_type == 0) {
4540                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4541                 return -EINVAL;
4542         }
4543
4544         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4545                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4546                         ETH_64_POOLS - 1);
4547                 return -EINVAL;
4548         }
4549
4550         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4551              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4552             (mirror_conf->pool_mask == 0)) {
4553                 RTE_ETHDEV_LOG(ERR,
4554                         "Invalid mirror pool, pool mask can not be 0\n");
4555                 return -EINVAL;
4556         }
4557
4558         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4559             mirror_conf->vlan.vlan_mask == 0) {
4560                 RTE_ETHDEV_LOG(ERR,
4561                         "Invalid vlan mask, vlan mask can not be 0\n");
4562                 return -EINVAL;
4563         }
4564
4565         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4566
4567         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4568                                                 mirror_conf, rule_id, on));
4569 }
4570
4571 int
4572 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4573 {
4574         struct rte_eth_dev *dev;
4575
4576         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4577         dev = &rte_eth_devices[port_id];
4578
4579         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4580         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev, rule_id));
4581 }
4582
4583 RTE_INIT(eth_dev_init_cb_lists)
4584 {
4585         uint16_t i;
4586
4587         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4588                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4589 }
4590
4591 int
4592 rte_eth_dev_callback_register(uint16_t port_id,
4593                         enum rte_eth_event_type event,
4594                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4595 {
4596         struct rte_eth_dev *dev;
4597         struct rte_eth_dev_callback *user_cb;
4598         uint16_t next_port;
4599         uint16_t last_port;
4600
4601         if (cb_fn == NULL) {
4602                 RTE_ETHDEV_LOG(ERR,
4603                         "Cannot register ethdev port %u callback from NULL\n",
4604                         port_id);
4605                 return -EINVAL;
4606         }
4607
4608         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4609                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4610                 return -EINVAL;
4611         }
4612
4613         if (port_id == RTE_ETH_ALL) {
4614                 next_port = 0;
4615                 last_port = RTE_MAX_ETHPORTS - 1;
4616         } else {
4617                 next_port = last_port = port_id;
4618         }
4619
4620         rte_spinlock_lock(&eth_dev_cb_lock);
4621
4622         do {
4623                 dev = &rte_eth_devices[next_port];
4624
4625                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4626                         if (user_cb->cb_fn == cb_fn &&
4627                                 user_cb->cb_arg == cb_arg &&
4628                                 user_cb->event == event) {
4629                                 break;
4630                         }
4631                 }
4632
4633                 /* create a new callback. */
4634                 if (user_cb == NULL) {
4635                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4636                                 sizeof(struct rte_eth_dev_callback), 0);
4637                         if (user_cb != NULL) {
4638                                 user_cb->cb_fn = cb_fn;
4639                                 user_cb->cb_arg = cb_arg;
4640                                 user_cb->event = event;
4641                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4642                                                   user_cb, next);
4643                         } else {
4644                                 rte_spinlock_unlock(&eth_dev_cb_lock);
4645                                 rte_eth_dev_callback_unregister(port_id, event,
4646                                                                 cb_fn, cb_arg);
4647                                 return -ENOMEM;
4648                         }
4649
4650                 }
4651         } while (++next_port <= last_port);
4652
4653         rte_spinlock_unlock(&eth_dev_cb_lock);
4654         return 0;
4655 }
4656
4657 int
4658 rte_eth_dev_callback_unregister(uint16_t port_id,
4659                         enum rte_eth_event_type event,
4660                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4661 {
4662         int ret;
4663         struct rte_eth_dev *dev;
4664         struct rte_eth_dev_callback *cb, *next;
4665         uint16_t next_port;
4666         uint16_t last_port;
4667
4668         if (cb_fn == NULL) {
4669                 RTE_ETHDEV_LOG(ERR,
4670                         "Cannot unregister ethdev port %u callback from NULL\n",
4671                         port_id);
4672                 return -EINVAL;
4673         }
4674
4675         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4676                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4677                 return -EINVAL;
4678         }
4679
4680         if (port_id == RTE_ETH_ALL) {
4681                 next_port = 0;
4682                 last_port = RTE_MAX_ETHPORTS - 1;
4683         } else {
4684                 next_port = last_port = port_id;
4685         }
4686
4687         rte_spinlock_lock(&eth_dev_cb_lock);
4688
4689         do {
4690                 dev = &rte_eth_devices[next_port];
4691                 ret = 0;
4692                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4693                      cb = next) {
4694
4695                         next = TAILQ_NEXT(cb, next);
4696
4697                         if (cb->cb_fn != cb_fn || cb->event != event ||
4698                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4699                                 continue;
4700
4701                         /*
4702                          * if this callback is not executing right now,
4703                          * then remove it.
4704                          */
4705                         if (cb->active == 0) {
4706                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4707                                 rte_free(cb);
4708                         } else {
4709                                 ret = -EAGAIN;
4710                         }
4711                 }
4712         } while (++next_port <= last_port);
4713
4714         rte_spinlock_unlock(&eth_dev_cb_lock);
4715         return ret;
4716 }
4717
4718 int
4719 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4720         enum rte_eth_event_type event, void *ret_param)
4721 {
4722         struct rte_eth_dev_callback *cb_lst;
4723         struct rte_eth_dev_callback dev_cb;
4724         int rc = 0;
4725
4726         rte_spinlock_lock(&eth_dev_cb_lock);
4727         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4728                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4729                         continue;
4730                 dev_cb = *cb_lst;
4731                 cb_lst->active = 1;
4732                 if (ret_param != NULL)
4733                         dev_cb.ret_param = ret_param;
4734
4735                 rte_spinlock_unlock(&eth_dev_cb_lock);
4736                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4737                                 dev_cb.cb_arg, dev_cb.ret_param);
4738                 rte_spinlock_lock(&eth_dev_cb_lock);
4739                 cb_lst->active = 0;
4740         }
4741         rte_spinlock_unlock(&eth_dev_cb_lock);
4742         return rc;
4743 }
4744
4745 void
4746 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4747 {
4748         if (dev == NULL)
4749                 return;
4750
4751         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4752
4753         dev->state = RTE_ETH_DEV_ATTACHED;
4754 }
4755
4756 int
4757 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4758 {
4759         uint32_t vec;
4760         struct rte_eth_dev *dev;
4761         struct rte_intr_handle *intr_handle;
4762         uint16_t qid;
4763         int rc;
4764
4765         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4766         dev = &rte_eth_devices[port_id];
4767
4768         if (!dev->intr_handle) {
4769                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4770                 return -ENOTSUP;
4771         }
4772
4773         intr_handle = dev->intr_handle;
4774         if (!intr_handle->intr_vec) {
4775                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4776                 return -EPERM;
4777         }
4778
4779         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4780                 vec = intr_handle->intr_vec[qid];
4781                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4782                 if (rc && rc != -EEXIST) {
4783                         RTE_ETHDEV_LOG(ERR,
4784                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4785                                 port_id, qid, op, epfd, vec);
4786                 }
4787         }
4788
4789         return 0;
4790 }
4791
4792 int
4793 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4794 {
4795         struct rte_intr_handle *intr_handle;
4796         struct rte_eth_dev *dev;
4797         unsigned int efd_idx;
4798         uint32_t vec;
4799         int fd;
4800
4801         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4802         dev = &rte_eth_devices[port_id];
4803
4804         if (queue_id >= dev->data->nb_rx_queues) {
4805                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4806                 return -1;
4807         }
4808
4809         if (!dev->intr_handle) {
4810                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4811                 return -1;
4812         }
4813
4814         intr_handle = dev->intr_handle;
4815         if (!intr_handle->intr_vec) {
4816                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4817                 return -1;
4818         }
4819
4820         vec = intr_handle->intr_vec[queue_id];
4821         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4822                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4823         fd = intr_handle->efds[efd_idx];
4824
4825         return fd;
4826 }
4827
4828 static inline int
4829 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4830                 const char *ring_name)
4831 {
4832         return snprintf(name, len, "eth_p%d_q%d_%s",
4833                         port_id, queue_id, ring_name);
4834 }
4835
4836 const struct rte_memzone *
4837 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4838                          uint16_t queue_id, size_t size, unsigned align,
4839                          int socket_id)
4840 {
4841         char z_name[RTE_MEMZONE_NAMESIZE];
4842         const struct rte_memzone *mz;
4843         int rc;
4844
4845         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4846                         queue_id, ring_name);
4847         if (rc >= RTE_MEMZONE_NAMESIZE) {
4848                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4849                 rte_errno = ENAMETOOLONG;
4850                 return NULL;
4851         }
4852
4853         mz = rte_memzone_lookup(z_name);
4854         if (mz) {
4855                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4856                                 size > mz->len ||
4857                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4858                         RTE_ETHDEV_LOG(ERR,
4859                                 "memzone %s does not justify the requested attributes\n",
4860                                 mz->name);
4861                         return NULL;
4862                 }
4863
4864                 return mz;
4865         }
4866
4867         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4868                         RTE_MEMZONE_IOVA_CONTIG, align);
4869 }
4870
4871 int
4872 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4873                 uint16_t queue_id)
4874 {
4875         char z_name[RTE_MEMZONE_NAMESIZE];
4876         const struct rte_memzone *mz;
4877         int rc = 0;
4878
4879         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4880                         queue_id, ring_name);
4881         if (rc >= RTE_MEMZONE_NAMESIZE) {
4882                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4883                 return -ENAMETOOLONG;
4884         }
4885
4886         mz = rte_memzone_lookup(z_name);
4887         if (mz)
4888                 rc = rte_memzone_free(mz);
4889         else
4890                 rc = -ENOENT;
4891
4892         return rc;
4893 }
4894
4895 int
4896 rte_eth_dev_create(struct rte_device *device, const char *name,
4897         size_t priv_data_size,
4898         ethdev_bus_specific_init ethdev_bus_specific_init,
4899         void *bus_init_params,
4900         ethdev_init_t ethdev_init, void *init_params)
4901 {
4902         struct rte_eth_dev *ethdev;
4903         int retval;
4904
4905         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4906
4907         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4908                 ethdev = rte_eth_dev_allocate(name);
4909                 if (!ethdev)
4910                         return -ENODEV;
4911
4912                 if (priv_data_size) {
4913                         ethdev->data->dev_private = rte_zmalloc_socket(
4914                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4915                                 device->numa_node);
4916
4917                         if (!ethdev->data->dev_private) {
4918                                 RTE_ETHDEV_LOG(ERR,
4919                                         "failed to allocate private data\n");
4920                                 retval = -ENOMEM;
4921                                 goto probe_failed;
4922                         }
4923                 }
4924         } else {
4925                 ethdev = rte_eth_dev_attach_secondary(name);
4926                 if (!ethdev) {
4927                         RTE_ETHDEV_LOG(ERR,
4928                                 "secondary process attach failed, ethdev doesn't exist\n");
4929                         return  -ENODEV;
4930                 }
4931         }
4932
4933         ethdev->device = device;
4934
4935         if (ethdev_bus_specific_init) {
4936                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4937                 if (retval) {
4938                         RTE_ETHDEV_LOG(ERR,
4939                                 "ethdev bus specific initialisation failed\n");
4940                         goto probe_failed;
4941                 }
4942         }
4943
4944         retval = ethdev_init(ethdev, init_params);
4945         if (retval) {
4946                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4947                 goto probe_failed;
4948         }
4949
4950         rte_eth_dev_probing_finish(ethdev);
4951
4952         return retval;
4953
4954 probe_failed:
4955         rte_eth_dev_release_port(ethdev);
4956         return retval;
4957 }
4958
4959 int
4960 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4961         ethdev_uninit_t ethdev_uninit)
4962 {
4963         int ret;
4964
4965         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4966         if (!ethdev)
4967                 return -ENODEV;
4968
4969         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4970
4971         ret = ethdev_uninit(ethdev);
4972         if (ret)
4973                 return ret;
4974
4975         return rte_eth_dev_release_port(ethdev);
4976 }
4977
4978 int
4979 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4980                           int epfd, int op, void *data)
4981 {
4982         uint32_t vec;
4983         struct rte_eth_dev *dev;
4984         struct rte_intr_handle *intr_handle;
4985         int rc;
4986
4987         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4988         dev = &rte_eth_devices[port_id];
4989
4990         if (queue_id >= dev->data->nb_rx_queues) {
4991                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4992                 return -EINVAL;
4993         }
4994
4995         if (!dev->intr_handle) {
4996                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4997                 return -ENOTSUP;
4998         }
4999
5000         intr_handle = dev->intr_handle;
5001         if (!intr_handle->intr_vec) {
5002                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
5003                 return -EPERM;
5004         }
5005
5006         vec = intr_handle->intr_vec[queue_id];
5007         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
5008         if (rc && rc != -EEXIST) {
5009                 RTE_ETHDEV_LOG(ERR,
5010                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
5011                         port_id, queue_id, op, epfd, vec);
5012                 return rc;
5013         }
5014
5015         return 0;
5016 }
5017
5018 int
5019 rte_eth_dev_rx_intr_enable(uint16_t port_id,
5020                            uint16_t queue_id)
5021 {
5022         struct rte_eth_dev *dev;
5023         int ret;
5024
5025         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5026         dev = &rte_eth_devices[port_id];
5027
5028         ret = eth_dev_validate_rx_queue(dev, queue_id);
5029         if (ret != 0)
5030                 return ret;
5031
5032         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
5033         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
5034 }
5035
5036 int
5037 rte_eth_dev_rx_intr_disable(uint16_t port_id,
5038                             uint16_t queue_id)
5039 {
5040         struct rte_eth_dev *dev;
5041         int ret;
5042
5043         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5044         dev = &rte_eth_devices[port_id];
5045
5046         ret = eth_dev_validate_rx_queue(dev, queue_id);
5047         if (ret != 0)
5048                 return ret;
5049
5050         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
5051         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
5052 }
5053
5054
5055 const struct rte_eth_rxtx_callback *
5056 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
5057                 rte_rx_callback_fn fn, void *user_param)
5058 {
5059 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5060         rte_errno = ENOTSUP;
5061         return NULL;
5062 #endif
5063         struct rte_eth_dev *dev;
5064
5065         /* check input parameters */
5066         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5067                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5068                 rte_errno = EINVAL;
5069                 return NULL;
5070         }
5071         dev = &rte_eth_devices[port_id];
5072         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5073                 rte_errno = EINVAL;
5074                 return NULL;
5075         }
5076         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5077
5078         if (cb == NULL) {
5079                 rte_errno = ENOMEM;
5080                 return NULL;
5081         }
5082
5083         cb->fn.rx = fn;
5084         cb->param = user_param;
5085
5086         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5087         /* Add the callbacks in fifo order. */
5088         struct rte_eth_rxtx_callback *tail =
5089                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5090
5091         if (!tail) {
5092                 /* Stores to cb->fn and cb->param should complete before
5093                  * cb is visible to data plane.
5094                  */
5095                 __atomic_store_n(
5096                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5097                         cb, __ATOMIC_RELEASE);
5098
5099         } else {
5100                 while (tail->next)
5101                         tail = tail->next;
5102                 /* Stores to cb->fn and cb->param should complete before
5103                  * cb is visible to data plane.
5104                  */
5105                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5106         }
5107         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5108
5109         return cb;
5110 }
5111
5112 const struct rte_eth_rxtx_callback *
5113 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5114                 rte_rx_callback_fn fn, void *user_param)
5115 {
5116 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5117         rte_errno = ENOTSUP;
5118         return NULL;
5119 #endif
5120         /* check input parameters */
5121         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5122                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5123                 rte_errno = EINVAL;
5124                 return NULL;
5125         }
5126
5127         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5128
5129         if (cb == NULL) {
5130                 rte_errno = ENOMEM;
5131                 return NULL;
5132         }
5133
5134         cb->fn.rx = fn;
5135         cb->param = user_param;
5136
5137         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5138         /* Add the callbacks at first position */
5139         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5140         /* Stores to cb->fn, cb->param and cb->next should complete before
5141          * cb is visible to data plane threads.
5142          */
5143         __atomic_store_n(
5144                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5145                 cb, __ATOMIC_RELEASE);
5146         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5147
5148         return cb;
5149 }
5150
5151 const struct rte_eth_rxtx_callback *
5152 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5153                 rte_tx_callback_fn fn, void *user_param)
5154 {
5155 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5156         rte_errno = ENOTSUP;
5157         return NULL;
5158 #endif
5159         struct rte_eth_dev *dev;
5160
5161         /* check input parameters */
5162         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5163                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
5164                 rte_errno = EINVAL;
5165                 return NULL;
5166         }
5167
5168         dev = &rte_eth_devices[port_id];
5169         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5170                 rte_errno = EINVAL;
5171                 return NULL;
5172         }
5173
5174         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5175
5176         if (cb == NULL) {
5177                 rte_errno = ENOMEM;
5178                 return NULL;
5179         }
5180
5181         cb->fn.tx = fn;
5182         cb->param = user_param;
5183
5184         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5185         /* Add the callbacks in fifo order. */
5186         struct rte_eth_rxtx_callback *tail =
5187                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
5188
5189         if (!tail) {
5190                 /* Stores to cb->fn and cb->param should complete before
5191                  * cb is visible to data plane.
5192                  */
5193                 __atomic_store_n(
5194                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
5195                         cb, __ATOMIC_RELEASE);
5196
5197         } else {
5198                 while (tail->next)
5199                         tail = tail->next;
5200                 /* Stores to cb->fn and cb->param should complete before
5201                  * cb is visible to data plane.
5202                  */
5203                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5204         }
5205         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5206
5207         return cb;
5208 }
5209
5210 int
5211 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5212                 const struct rte_eth_rxtx_callback *user_cb)
5213 {
5214 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5215         return -ENOTSUP;
5216 #endif
5217         /* Check input parameters. */
5218         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5219         if (user_cb == NULL ||
5220                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
5221                 return -EINVAL;
5222
5223         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5224         struct rte_eth_rxtx_callback *cb;
5225         struct rte_eth_rxtx_callback **prev_cb;
5226         int ret = -EINVAL;
5227
5228         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5229         prev_cb = &dev->post_rx_burst_cbs[queue_id];
5230         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5231                 cb = *prev_cb;
5232                 if (cb == user_cb) {
5233                         /* Remove the user cb from the callback list. */
5234                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5235                         ret = 0;
5236                         break;
5237                 }
5238         }
5239         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5240
5241         return ret;
5242 }
5243
5244 int
5245 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5246                 const struct rte_eth_rxtx_callback *user_cb)
5247 {
5248 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5249         return -ENOTSUP;
5250 #endif
5251         /* Check input parameters. */
5252         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5253         if (user_cb == NULL ||
5254                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5255                 return -EINVAL;
5256
5257         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5258         int ret = -EINVAL;
5259         struct rte_eth_rxtx_callback *cb;
5260         struct rte_eth_rxtx_callback **prev_cb;
5261
5262         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5263         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5264         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5265                 cb = *prev_cb;
5266                 if (cb == user_cb) {
5267                         /* Remove the user cb from the callback list. */
5268                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5269                         ret = 0;
5270                         break;
5271                 }
5272         }
5273         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5274
5275         return ret;
5276 }
5277
5278 int
5279 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5280         struct rte_eth_rxq_info *qinfo)
5281 {
5282         struct rte_eth_dev *dev;
5283
5284         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5285         dev = &rte_eth_devices[port_id];
5286
5287         if (queue_id >= dev->data->nb_rx_queues) {
5288                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5289                 return -EINVAL;
5290         }
5291
5292         if (qinfo == NULL) {
5293                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5294                         port_id, queue_id);
5295                 return -EINVAL;
5296         }
5297
5298         if (dev->data->rx_queues == NULL ||
5299                         dev->data->rx_queues[queue_id] == NULL) {
5300                 RTE_ETHDEV_LOG(ERR,
5301                                "Rx queue %"PRIu16" of device with port_id=%"
5302                                PRIu16" has not been setup\n",
5303                                queue_id, port_id);
5304                 return -EINVAL;
5305         }
5306
5307         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5308                 RTE_ETHDEV_LOG(INFO,
5309                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5310                         queue_id, port_id);
5311                 return -EINVAL;
5312         }
5313
5314         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5315
5316         memset(qinfo, 0, sizeof(*qinfo));
5317         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5318         qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5319
5320         return 0;
5321 }
5322
5323 int
5324 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5325         struct rte_eth_txq_info *qinfo)
5326 {
5327         struct rte_eth_dev *dev;
5328
5329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5330         dev = &rte_eth_devices[port_id];
5331
5332         if (queue_id >= dev->data->nb_tx_queues) {
5333                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5334                 return -EINVAL;
5335         }
5336
5337         if (qinfo == NULL) {
5338                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5339                         port_id, queue_id);
5340                 return -EINVAL;
5341         }
5342
5343         if (dev->data->tx_queues == NULL ||
5344                         dev->data->tx_queues[queue_id] == NULL) {
5345                 RTE_ETHDEV_LOG(ERR,
5346                                "Tx queue %"PRIu16" of device with port_id=%"
5347                                PRIu16" has not been setup\n",
5348                                queue_id, port_id);
5349                 return -EINVAL;
5350         }
5351
5352         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5353                 RTE_ETHDEV_LOG(INFO,
5354                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5355                         queue_id, port_id);
5356                 return -EINVAL;
5357         }
5358
5359         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5360
5361         memset(qinfo, 0, sizeof(*qinfo));
5362         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5363         qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5364
5365         return 0;
5366 }
5367
5368 int
5369 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5370                           struct rte_eth_burst_mode *mode)
5371 {
5372         struct rte_eth_dev *dev;
5373
5374         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5375         dev = &rte_eth_devices[port_id];
5376
5377         if (queue_id >= dev->data->nb_rx_queues) {
5378                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5379                 return -EINVAL;
5380         }
5381
5382         if (mode == NULL) {
5383                 RTE_ETHDEV_LOG(ERR,
5384                         "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5385                         port_id, queue_id);
5386                 return -EINVAL;
5387         }
5388
5389         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5390         memset(mode, 0, sizeof(*mode));
5391         return eth_err(port_id,
5392                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5393 }
5394
5395 int
5396 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5397                           struct rte_eth_burst_mode *mode)
5398 {
5399         struct rte_eth_dev *dev;
5400
5401         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5402         dev = &rte_eth_devices[port_id];
5403
5404         if (queue_id >= dev->data->nb_tx_queues) {
5405                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5406                 return -EINVAL;
5407         }
5408
5409         if (mode == NULL) {
5410                 RTE_ETHDEV_LOG(ERR,
5411                         "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5412                         port_id, queue_id);
5413                 return -EINVAL;
5414         }
5415
5416         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5417         memset(mode, 0, sizeof(*mode));
5418         return eth_err(port_id,
5419                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5420 }
5421
5422 int
5423 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5424                 struct rte_power_monitor_cond *pmc)
5425 {
5426         struct rte_eth_dev *dev;
5427
5428         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5429         dev = &rte_eth_devices[port_id];
5430
5431         if (queue_id >= dev->data->nb_rx_queues) {
5432                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5433                 return -EINVAL;
5434         }
5435
5436         if (pmc == NULL) {
5437                 RTE_ETHDEV_LOG(ERR,
5438                         "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5439                         port_id, queue_id);
5440                 return -EINVAL;
5441         }
5442
5443         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
5444         return eth_err(port_id,
5445                 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5446 }
5447
5448 int
5449 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5450                              struct rte_ether_addr *mc_addr_set,
5451                              uint32_t nb_mc_addr)
5452 {
5453         struct rte_eth_dev *dev;
5454
5455         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5456         dev = &rte_eth_devices[port_id];
5457
5458         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5459         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5460                                                 mc_addr_set, nb_mc_addr));
5461 }
5462
5463 int
5464 rte_eth_timesync_enable(uint16_t port_id)
5465 {
5466         struct rte_eth_dev *dev;
5467
5468         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5469         dev = &rte_eth_devices[port_id];
5470
5471         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5472         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5473 }
5474
5475 int
5476 rte_eth_timesync_disable(uint16_t port_id)
5477 {
5478         struct rte_eth_dev *dev;
5479
5480         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5481         dev = &rte_eth_devices[port_id];
5482
5483         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5484         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5485 }
5486
5487 int
5488 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5489                                    uint32_t flags)
5490 {
5491         struct rte_eth_dev *dev;
5492
5493         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5494         dev = &rte_eth_devices[port_id];
5495
5496         if (timestamp == NULL) {
5497                 RTE_ETHDEV_LOG(ERR,
5498                         "Cannot read ethdev port %u Rx timestamp to NULL\n",
5499                         port_id);
5500                 return -EINVAL;
5501         }
5502
5503         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5504         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5505                                 (dev, timestamp, flags));
5506 }
5507
5508 int
5509 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5510                                    struct timespec *timestamp)
5511 {
5512         struct rte_eth_dev *dev;
5513
5514         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5515         dev = &rte_eth_devices[port_id];
5516
5517         if (timestamp == NULL) {
5518                 RTE_ETHDEV_LOG(ERR,
5519                         "Cannot read ethdev port %u Tx timestamp to NULL\n",
5520                         port_id);
5521                 return -EINVAL;
5522         }
5523
5524         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5525         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5526                                 (dev, timestamp));
5527 }
5528
5529 int
5530 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5531 {
5532         struct rte_eth_dev *dev;
5533
5534         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5535         dev = &rte_eth_devices[port_id];
5536
5537         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5538         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5539 }
5540
5541 int
5542 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5543 {
5544         struct rte_eth_dev *dev;
5545
5546         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5547         dev = &rte_eth_devices[port_id];
5548
5549         if (timestamp == NULL) {
5550                 RTE_ETHDEV_LOG(ERR,
5551                         "Cannot read ethdev port %u timesync time to NULL\n",
5552                         port_id);
5553                 return -EINVAL;
5554         }
5555
5556         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5557         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5558                                                                 timestamp));
5559 }
5560
5561 int
5562 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5563 {
5564         struct rte_eth_dev *dev;
5565
5566         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5567         dev = &rte_eth_devices[port_id];
5568
5569         if (timestamp == NULL) {
5570                 RTE_ETHDEV_LOG(ERR,
5571                         "Cannot write ethdev port %u timesync from NULL time\n",
5572                         port_id);
5573                 return -EINVAL;
5574         }
5575
5576         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5577         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5578                                                                 timestamp));
5579 }
5580
5581 int
5582 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5583 {
5584         struct rte_eth_dev *dev;
5585
5586         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5587         dev = &rte_eth_devices[port_id];
5588
5589         if (clock == NULL) {
5590                 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5591                         port_id);
5592                 return -EINVAL;
5593         }
5594
5595         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5596         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5597 }
5598
5599 int
5600 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5601 {
5602         struct rte_eth_dev *dev;
5603
5604         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5605         dev = &rte_eth_devices[port_id];
5606
5607         if (info == NULL) {
5608                 RTE_ETHDEV_LOG(ERR,
5609                         "Cannot get ethdev port %u register info to NULL\n",
5610                         port_id);
5611                 return -EINVAL;
5612         }
5613
5614         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5615         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5616 }
5617
5618 int
5619 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5620 {
5621         struct rte_eth_dev *dev;
5622
5623         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5624         dev = &rte_eth_devices[port_id];
5625
5626         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5627         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5628 }
5629
5630 int
5631 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5632 {
5633         struct rte_eth_dev *dev;
5634
5635         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5636         dev = &rte_eth_devices[port_id];
5637
5638         if (info == NULL) {
5639                 RTE_ETHDEV_LOG(ERR,
5640                         "Cannot get ethdev port %u EEPROM info to NULL\n",
5641                         port_id);
5642                 return -EINVAL;
5643         }
5644
5645         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5646         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5647 }
5648
5649 int
5650 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5651 {
5652         struct rte_eth_dev *dev;
5653
5654         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5655         dev = &rte_eth_devices[port_id];
5656
5657         if (info == NULL) {
5658                 RTE_ETHDEV_LOG(ERR,
5659                         "Cannot set ethdev port %u EEPROM from NULL info\n",
5660                         port_id);
5661                 return -EINVAL;
5662         }
5663
5664         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5665         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5666 }
5667
5668 int
5669 rte_eth_dev_get_module_info(uint16_t port_id,
5670                             struct rte_eth_dev_module_info *modinfo)
5671 {
5672         struct rte_eth_dev *dev;
5673
5674         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5675         dev = &rte_eth_devices[port_id];
5676
5677         if (modinfo == NULL) {
5678                 RTE_ETHDEV_LOG(ERR,
5679                         "Cannot get ethdev port %u EEPROM module info to NULL\n",
5680                         port_id);
5681                 return -EINVAL;
5682         }
5683
5684         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5685         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5686 }
5687
5688 int
5689 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5690                               struct rte_dev_eeprom_info *info)
5691 {
5692         struct rte_eth_dev *dev;
5693
5694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5695         dev = &rte_eth_devices[port_id];
5696
5697         if (info == NULL) {
5698                 RTE_ETHDEV_LOG(ERR,
5699                         "Cannot get ethdev port %u module EEPROM info to NULL\n",
5700                         port_id);
5701                 return -EINVAL;
5702         }
5703
5704         if (info->data == NULL) {
5705                 RTE_ETHDEV_LOG(ERR,
5706                         "Cannot get ethdev port %u module EEPROM data to NULL\n",
5707                         port_id);
5708                 return -EINVAL;
5709         }
5710
5711         if (info->length == 0) {
5712                 RTE_ETHDEV_LOG(ERR,
5713                         "Cannot get ethdev port %u module EEPROM to data with zero size\n",
5714                         port_id);
5715                 return -EINVAL;
5716         }
5717
5718         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5719         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5720 }
5721
5722 int
5723 rte_eth_dev_get_dcb_info(uint16_t port_id,
5724                              struct rte_eth_dcb_info *dcb_info)
5725 {
5726         struct rte_eth_dev *dev;
5727
5728         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5729         dev = &rte_eth_devices[port_id];
5730
5731         if (dcb_info == NULL) {
5732                 RTE_ETHDEV_LOG(ERR,
5733                         "Cannot get ethdev port %u DCB info to NULL\n",
5734                         port_id);
5735                 return -EINVAL;
5736         }
5737
5738         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5739
5740         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5741         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5742 }
5743
5744 static void
5745 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5746                 const struct rte_eth_desc_lim *desc_lim)
5747 {
5748         if (desc_lim->nb_align != 0)
5749                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5750
5751         if (desc_lim->nb_max != 0)
5752                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5753
5754         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5755 }
5756
5757 int
5758 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5759                                  uint16_t *nb_rx_desc,
5760                                  uint16_t *nb_tx_desc)
5761 {
5762         struct rte_eth_dev_info dev_info;
5763         int ret;
5764
5765         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5766
5767         ret = rte_eth_dev_info_get(port_id, &dev_info);
5768         if (ret != 0)
5769                 return ret;
5770
5771         if (nb_rx_desc != NULL)
5772                 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5773
5774         if (nb_tx_desc != NULL)
5775                 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5776
5777         return 0;
5778 }
5779
5780 int
5781 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5782                                    struct rte_eth_hairpin_cap *cap)
5783 {
5784         struct rte_eth_dev *dev;
5785
5786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5787         dev = &rte_eth_devices[port_id];
5788
5789         if (cap == NULL) {
5790                 RTE_ETHDEV_LOG(ERR,
5791                         "Cannot get ethdev port %u hairpin capability to NULL\n",
5792                         port_id);
5793                 return -EINVAL;
5794         }
5795
5796         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5797         memset(cap, 0, sizeof(*cap));
5798         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5799 }
5800
5801 int
5802 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5803 {
5804         if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5805                 return 1;
5806         return 0;
5807 }
5808
5809 int
5810 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5811 {
5812         if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5813                 return 1;
5814         return 0;
5815 }
5816
5817 int
5818 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5819 {
5820         struct rte_eth_dev *dev;
5821
5822         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5823         dev = &rte_eth_devices[port_id];
5824
5825         if (pool == NULL) {
5826                 RTE_ETHDEV_LOG(ERR,
5827                         "Cannot test ethdev port %u mempool operation from NULL pool\n",
5828                         port_id);
5829                 return -EINVAL;
5830         }
5831
5832         if (*dev->dev_ops->pool_ops_supported == NULL)
5833                 return 1; /* all pools are supported */
5834
5835         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5836 }
5837
5838 /**
5839  * A set of values to describe the possible states of a switch domain.
5840  */
5841 enum rte_eth_switch_domain_state {
5842         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5843         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5844 };
5845
5846 /**
5847  * Array of switch domains available for allocation. Array is sized to
5848  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5849  * ethdev ports in a single process.
5850  */
5851 static struct rte_eth_dev_switch {
5852         enum rte_eth_switch_domain_state state;
5853 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
5854
5855 int
5856 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5857 {
5858         uint16_t i;
5859
5860         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5861
5862         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5863                 if (eth_dev_switch_domains[i].state ==
5864                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5865                         eth_dev_switch_domains[i].state =
5866                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5867                         *domain_id = i;
5868                         return 0;
5869                 }
5870         }
5871
5872         return -ENOSPC;
5873 }
5874
5875 int
5876 rte_eth_switch_domain_free(uint16_t domain_id)
5877 {
5878         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5879                 domain_id >= RTE_MAX_ETHPORTS)
5880                 return -EINVAL;
5881
5882         if (eth_dev_switch_domains[domain_id].state !=
5883                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5884                 return -EINVAL;
5885
5886         eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5887
5888         return 0;
5889 }
5890
5891 static int
5892 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5893 {
5894         int state;
5895         struct rte_kvargs_pair *pair;
5896         char *letter;
5897
5898         arglist->str = strdup(str_in);
5899         if (arglist->str == NULL)
5900                 return -ENOMEM;
5901
5902         letter = arglist->str;
5903         state = 0;
5904         arglist->count = 0;
5905         pair = &arglist->pairs[0];
5906         while (1) {
5907                 switch (state) {
5908                 case 0: /* Initial */
5909                         if (*letter == '=')
5910                                 return -EINVAL;
5911                         else if (*letter == '\0')
5912                                 return 0;
5913
5914                         state = 1;
5915                         pair->key = letter;
5916                         /* fall-thru */
5917
5918                 case 1: /* Parsing key */
5919                         if (*letter == '=') {
5920                                 *letter = '\0';
5921                                 pair->value = letter + 1;
5922                                 state = 2;
5923                         } else if (*letter == ',' || *letter == '\0')
5924                                 return -EINVAL;
5925                         break;
5926
5927
5928                 case 2: /* Parsing value */
5929                         if (*letter == '[')
5930                                 state = 3;
5931                         else if (*letter == ',') {
5932                                 *letter = '\0';
5933                                 arglist->count++;
5934                                 pair = &arglist->pairs[arglist->count];
5935                                 state = 0;
5936                         } else if (*letter == '\0') {
5937                                 letter--;
5938                                 arglist->count++;
5939                                 pair = &arglist->pairs[arglist->count];
5940                                 state = 0;
5941                         }
5942                         break;
5943
5944                 case 3: /* Parsing list */
5945                         if (*letter == ']')
5946                                 state = 2;
5947                         else if (*letter == '\0')
5948                                 return -EINVAL;
5949                         break;
5950                 }
5951                 letter++;
5952         }
5953 }
5954
5955 int
5956 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5957 {
5958         struct rte_kvargs args;
5959         struct rte_kvargs_pair *pair;
5960         unsigned int i;
5961         int result = 0;
5962
5963         memset(eth_da, 0, sizeof(*eth_da));
5964
5965         result = eth_dev_devargs_tokenise(&args, dargs);
5966         if (result < 0)
5967                 goto parse_cleanup;
5968
5969         for (i = 0; i < args.count; i++) {
5970                 pair = &args.pairs[i];
5971                 if (strcmp("representor", pair->key) == 0) {
5972                         if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
5973                                 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
5974                                         dargs);
5975                                 result = -1;
5976                                 goto parse_cleanup;
5977                         }
5978                         result = rte_eth_devargs_parse_representor_ports(
5979                                         pair->value, eth_da);
5980                         if (result < 0)
5981                                 goto parse_cleanup;
5982                 }
5983         }
5984
5985 parse_cleanup:
5986         if (args.str)
5987                 free(args.str);
5988
5989         return result;
5990 }
5991
5992 int
5993 rte_eth_representor_id_get(const struct rte_eth_dev *ethdev,
5994                            enum rte_eth_representor_type type,
5995                            int controller, int pf, int representor_port,
5996                            uint16_t *repr_id)
5997 {
5998         int ret, n, count;
5999         uint32_t i;
6000         struct rte_eth_representor_info *info = NULL;
6001         size_t size;
6002
6003         if (type == RTE_ETH_REPRESENTOR_NONE)
6004                 return 0;
6005         if (repr_id == NULL)
6006                 return -EINVAL;
6007
6008         /* Get PMD representor range info. */
6009         ret = rte_eth_representor_info_get(ethdev->data->port_id, NULL);
6010         if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
6011             controller == -1 && pf == -1) {
6012                 /* Direct mapping for legacy VF representor. */
6013                 *repr_id = representor_port;
6014                 return 0;
6015         } else if (ret < 0) {
6016                 return ret;
6017         }
6018         n = ret;
6019         size = sizeof(*info) + n * sizeof(info->ranges[0]);
6020         info = calloc(1, size);
6021         if (info == NULL)
6022                 return -ENOMEM;
6023         info->nb_ranges_alloc = n;
6024         ret = rte_eth_representor_info_get(ethdev->data->port_id, info);
6025         if (ret < 0)
6026                 goto out;
6027
6028         /* Default controller and pf to caller. */
6029         if (controller == -1)
6030                 controller = info->controller;
6031         if (pf == -1)
6032                 pf = info->pf;
6033
6034         /* Locate representor ID. */
6035         ret = -ENOENT;
6036         for (i = 0; i < info->nb_ranges; ++i) {
6037                 if (info->ranges[i].type != type)
6038                         continue;
6039                 if (info->ranges[i].controller != controller)
6040                         continue;
6041                 if (info->ranges[i].id_end < info->ranges[i].id_base) {
6042                         RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
6043                                 ethdev->data->port_id, info->ranges[i].id_base,
6044                                 info->ranges[i].id_end, i);
6045                         continue;
6046
6047                 }
6048                 count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
6049                 switch (info->ranges[i].type) {
6050                 case RTE_ETH_REPRESENTOR_PF:
6051                         if (pf < info->ranges[i].pf ||
6052                             pf >= info->ranges[i].pf + count)
6053                                 continue;
6054                         *repr_id = info->ranges[i].id_base +
6055                                    (pf - info->ranges[i].pf);
6056                         ret = 0;
6057                         goto out;
6058                 case RTE_ETH_REPRESENTOR_VF:
6059                         if (info->ranges[i].pf != pf)
6060                                 continue;
6061                         if (representor_port < info->ranges[i].vf ||
6062                             representor_port >= info->ranges[i].vf + count)
6063                                 continue;
6064                         *repr_id = info->ranges[i].id_base +
6065                                    (representor_port - info->ranges[i].vf);
6066                         ret = 0;
6067                         goto out;
6068                 case RTE_ETH_REPRESENTOR_SF:
6069                         if (info->ranges[i].pf != pf)
6070                                 continue;
6071                         if (representor_port < info->ranges[i].sf ||
6072                             representor_port >= info->ranges[i].sf + count)
6073                                 continue;
6074                         *repr_id = info->ranges[i].id_base +
6075                               (representor_port - info->ranges[i].sf);
6076                         ret = 0;
6077                         goto out;
6078                 default:
6079                         break;
6080                 }
6081         }
6082 out:
6083         free(info);
6084         return ret;
6085 }
6086
6087 static int
6088 eth_dev_handle_port_list(const char *cmd __rte_unused,
6089                 const char *params __rte_unused,
6090                 struct rte_tel_data *d)
6091 {
6092         int port_id;
6093
6094         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
6095         RTE_ETH_FOREACH_DEV(port_id)
6096                 rte_tel_data_add_array_int(d, port_id);
6097         return 0;
6098 }
6099
6100 static void
6101 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
6102                 const char *stat_name)
6103 {
6104         int q;
6105         struct rte_tel_data *q_data = rte_tel_data_alloc();
6106         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
6107         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
6108                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
6109         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
6110 }
6111
6112 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
6113
6114 static int
6115 eth_dev_handle_port_stats(const char *cmd __rte_unused,
6116                 const char *params,
6117                 struct rte_tel_data *d)
6118 {
6119         struct rte_eth_stats stats;
6120         int port_id, ret;
6121
6122         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6123                 return -1;
6124
6125         port_id = atoi(params);
6126         if (!rte_eth_dev_is_valid_port(port_id))
6127                 return -1;
6128
6129         ret = rte_eth_stats_get(port_id, &stats);
6130         if (ret < 0)
6131                 return -1;
6132
6133         rte_tel_data_start_dict(d);
6134         ADD_DICT_STAT(stats, ipackets);
6135         ADD_DICT_STAT(stats, opackets);
6136         ADD_DICT_STAT(stats, ibytes);
6137         ADD_DICT_STAT(stats, obytes);
6138         ADD_DICT_STAT(stats, imissed);
6139         ADD_DICT_STAT(stats, ierrors);
6140         ADD_DICT_STAT(stats, oerrors);
6141         ADD_DICT_STAT(stats, rx_nombuf);
6142         eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
6143         eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
6144         eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
6145         eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
6146         eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
6147
6148         return 0;
6149 }
6150
6151 static int
6152 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
6153                 const char *params,
6154                 struct rte_tel_data *d)
6155 {
6156         struct rte_eth_xstat *eth_xstats;
6157         struct rte_eth_xstat_name *xstat_names;
6158         int port_id, num_xstats;
6159         int i, ret;
6160         char *end_param;
6161
6162         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6163                 return -1;
6164
6165         port_id = strtoul(params, &end_param, 0);
6166         if (*end_param != '\0')
6167                 RTE_ETHDEV_LOG(NOTICE,
6168                         "Extra parameters passed to ethdev telemetry command, ignoring");
6169         if (!rte_eth_dev_is_valid_port(port_id))
6170                 return -1;
6171
6172         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
6173         if (num_xstats < 0)
6174                 return -1;
6175
6176         /* use one malloc for both names and stats */
6177         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
6178                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
6179         if (eth_xstats == NULL)
6180                 return -1;
6181         xstat_names = (void *)&eth_xstats[num_xstats];
6182
6183         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
6184         if (ret < 0 || ret > num_xstats) {
6185                 free(eth_xstats);
6186                 return -1;
6187         }
6188
6189         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
6190         if (ret < 0 || ret > num_xstats) {
6191                 free(eth_xstats);
6192                 return -1;
6193         }
6194
6195         rte_tel_data_start_dict(d);
6196         for (i = 0; i < num_xstats; i++)
6197                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
6198                                 eth_xstats[i].value);
6199         return 0;
6200 }
6201
6202 static int
6203 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
6204                 const char *params,
6205                 struct rte_tel_data *d)
6206 {
6207         static const char *status_str = "status";
6208         int ret, port_id;
6209         struct rte_eth_link link;
6210         char *end_param;
6211
6212         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6213                 return -1;
6214
6215         port_id = strtoul(params, &end_param, 0);
6216         if (*end_param != '\0')
6217                 RTE_ETHDEV_LOG(NOTICE,
6218                         "Extra parameters passed to ethdev telemetry command, ignoring");
6219         if (!rte_eth_dev_is_valid_port(port_id))
6220                 return -1;
6221
6222         ret = rte_eth_link_get_nowait(port_id, &link);
6223         if (ret < 0)
6224                 return -1;
6225
6226         rte_tel_data_start_dict(d);
6227         if (!link.link_status) {
6228                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
6229                 return 0;
6230         }
6231         rte_tel_data_add_dict_string(d, status_str, "UP");
6232         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
6233         rte_tel_data_add_dict_string(d, "duplex",
6234                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
6235                                 "full-duplex" : "half-duplex");
6236         return 0;
6237 }
6238
6239 int
6240 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
6241                                   struct rte_hairpin_peer_info *cur_info,
6242                                   struct rte_hairpin_peer_info *peer_info,
6243                                   uint32_t direction)
6244 {
6245         struct rte_eth_dev *dev;
6246
6247         /* Current queue information is not mandatory. */
6248         if (peer_info == NULL)
6249                 return -EINVAL;
6250
6251         /* No need to check the validity again. */
6252         dev = &rte_eth_devices[peer_port];
6253         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
6254                                 -ENOTSUP);
6255
6256         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
6257                                         cur_info, peer_info, direction);
6258 }
6259
6260 int
6261 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
6262                                 struct rte_hairpin_peer_info *peer_info,
6263                                 uint32_t direction)
6264 {
6265         struct rte_eth_dev *dev;
6266
6267         if (peer_info == NULL)
6268                 return -EINVAL;
6269
6270         /* No need to check the validity again. */
6271         dev = &rte_eth_devices[cur_port];
6272         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
6273                                 -ENOTSUP);
6274
6275         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
6276                                                         peer_info, direction);
6277 }
6278
6279 int
6280 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
6281                                   uint32_t direction)
6282 {
6283         struct rte_eth_dev *dev;
6284
6285         /* No need to check the validity again. */
6286         dev = &rte_eth_devices[cur_port];
6287         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
6288                                 -ENOTSUP);
6289
6290         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
6291                                                           direction);
6292 }
6293
6294 int
6295 rte_eth_representor_info_get(uint16_t port_id,
6296                              struct rte_eth_representor_info *info)
6297 {
6298         struct rte_eth_dev *dev;
6299
6300         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6301         dev = &rte_eth_devices[port_id];
6302
6303         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
6304         return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6305 }
6306
6307 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
6308
6309 RTE_INIT(ethdev_init_telemetry)
6310 {
6311         rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
6312                         "Returns list of available ethdev ports. Takes no parameters");
6313         rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
6314                         "Returns the common stats for a port. Parameters: int port_id");
6315         rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
6316                         "Returns the extended stats for a port. Parameters: int port_id");
6317         rte_telemetry_register_cmd("/ethdev/link_status",
6318                         eth_dev_handle_port_link_status,
6319                         "Returns the link status for a port. Parameters: int port_id");
6320 }