74de29c2e06bbaf81fc620f83acef9e63c5a78b1
[dpdk.git] / lib / ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13
14 #include <rte_byteorder.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_interrupts.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_memzone.h>
21 #include <rte_launch.h>
22 #include <rte_eal.h>
23 #include <rte_per_lcore.h>
24 #include <rte_lcore.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_common.h>
27 #include <rte_mempool.h>
28 #include <rte_malloc.h>
29 #include <rte_mbuf.h>
30 #include <rte_errno.h>
31 #include <rte_spinlock.h>
32 #include <rte_string_fns.h>
33 #include <rte_kvargs.h>
34 #include <rte_class.h>
35 #include <rte_ether.h>
36 #include <rte_telemetry.h>
37
38 #include "rte_ethdev_trace.h"
39 #include "rte_ethdev.h"
40 #include "ethdev_driver.h"
41 #include "ethdev_profile.h"
42 #include "ethdev_private.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46
47 /* public fast-path API */
48 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
49
50 /* spinlock for eth device callbacks */
51 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
52
53 /* spinlock for add/remove Rx callbacks */
54 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for add/remove Tx callbacks */
57 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* spinlock for shared data allocation */
60 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
61
62 /* store statistics names and its offset in stats structure  */
63 struct rte_eth_xstats_name_off {
64         char name[RTE_ETH_XSTATS_NAME_SIZE];
65         unsigned offset;
66 };
67
68 /* Shared memory between primary and secondary processes. */
69 static struct {
70         uint64_t next_owner_id;
71         rte_spinlock_t ownership_lock;
72         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
73 } *eth_dev_shared_data;
74
75 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
76         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
77         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
78         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
79         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
80         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
81         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
82         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
83         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
84                 rx_nombuf)},
85 };
86
87 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
88
89 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
90         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
91         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
92         {"errors", offsetof(struct rte_eth_stats, q_errors)},
93 };
94
95 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
96
97 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
98         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
99         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
100 };
101 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
102
103 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
104         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
105
106 static const struct {
107         uint64_t offload;
108         const char *name;
109 } eth_dev_rx_offload_names[] = {
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
111         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
112         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
113         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
115         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
119         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
120         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
121         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
122         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
123         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
124         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
125         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
126         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
127         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
128         RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
129 };
130
131 #undef RTE_RX_OFFLOAD_BIT2STR
132 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
133
134 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
135         { RTE_ETH_TX_OFFLOAD_##_name, #_name }
136
137 static const struct {
138         uint64_t offload;
139         const char *name;
140 } eth_dev_tx_offload_names[] = {
141         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
142         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
143         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
147         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
150         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
151         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
155         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
156         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
157         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
158         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
159         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
160         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
162         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
163 };
164
165 #undef RTE_TX_OFFLOAD_BIT2STR
166
167 static const struct {
168         uint64_t offload;
169         const char *name;
170 } rte_eth_dev_capa_names[] = {
171         {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
172         {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
173         {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
174 };
175
176 /**
177  * The user application callback description.
178  *
179  * It contains callback address to be registered by user application,
180  * the pointer to the parameters for callback, and the event type.
181  */
182 struct rte_eth_dev_callback {
183         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
184         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
185         void *cb_arg;                           /**< Parameter for callback */
186         void *ret_param;                        /**< Return parameter */
187         enum rte_eth_event_type event;          /**< Interrupt event type */
188         uint32_t active;                        /**< Callback is executing */
189 };
190
191 enum {
192         STAT_QMAP_TX = 0,
193         STAT_QMAP_RX
194 };
195
196 int
197 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
198 {
199         int ret;
200         struct rte_devargs devargs;
201         const char *bus_param_key;
202         char *bus_str = NULL;
203         char *cls_str = NULL;
204         int str_size;
205
206         if (iter == NULL) {
207                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
208                 return -EINVAL;
209         }
210
211         if (devargs_str == NULL) {
212                 RTE_ETHDEV_LOG(ERR,
213                         "Cannot initialize iterator from NULL device description string\n");
214                 return -EINVAL;
215         }
216
217         memset(iter, 0, sizeof(*iter));
218         memset(&devargs, 0, sizeof(devargs));
219
220         /*
221          * The devargs string may use various syntaxes:
222          *   - 0000:08:00.0,representor=[1-3]
223          *   - pci:0000:06:00.0,representor=[0,5]
224          *   - class=eth,mac=00:11:22:33:44:55
225          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
226          */
227
228         /*
229          * Handle pure class filter (i.e. without any bus-level argument),
230          * from future new syntax.
231          * rte_devargs_parse() is not yet supporting the new syntax,
232          * that's why this simple case is temporarily parsed here.
233          */
234 #define iter_anybus_str "class=eth,"
235         if (strncmp(devargs_str, iter_anybus_str,
236                         strlen(iter_anybus_str)) == 0) {
237                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
238                 goto end;
239         }
240
241         /* Split bus, device and parameters. */
242         ret = rte_devargs_parse(&devargs, devargs_str);
243         if (ret != 0)
244                 goto error;
245
246         /*
247          * Assume parameters of old syntax can match only at ethdev level.
248          * Extra parameters will be ignored, thanks to "+" prefix.
249          */
250         str_size = strlen(devargs.args) + 2;
251         cls_str = malloc(str_size);
252         if (cls_str == NULL) {
253                 ret = -ENOMEM;
254                 goto error;
255         }
256         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
257         if (ret != str_size - 1) {
258                 ret = -EINVAL;
259                 goto error;
260         }
261         iter->cls_str = cls_str;
262
263         iter->bus = devargs.bus;
264         if (iter->bus->dev_iterate == NULL) {
265                 ret = -ENOTSUP;
266                 goto error;
267         }
268
269         /* Convert bus args to new syntax for use with new API dev_iterate. */
270         if ((strcmp(iter->bus->name, "vdev") == 0) ||
271                 (strcmp(iter->bus->name, "fslmc") == 0) ||
272                 (strcmp(iter->bus->name, "dpaa_bus") == 0)) {
273                 bus_param_key = "name";
274         } else if (strcmp(iter->bus->name, "pci") == 0) {
275                 bus_param_key = "addr";
276         } else {
277                 ret = -ENOTSUP;
278                 goto error;
279         }
280         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
281         bus_str = malloc(str_size);
282         if (bus_str == NULL) {
283                 ret = -ENOMEM;
284                 goto error;
285         }
286         ret = snprintf(bus_str, str_size, "%s=%s",
287                         bus_param_key, devargs.name);
288         if (ret != str_size - 1) {
289                 ret = -EINVAL;
290                 goto error;
291         }
292         iter->bus_str = bus_str;
293
294 end:
295         iter->cls = rte_class_find_by_name("eth");
296         rte_devargs_reset(&devargs);
297         return 0;
298
299 error:
300         if (ret == -ENOTSUP)
301                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
302                                 iter->bus->name);
303         rte_devargs_reset(&devargs);
304         free(bus_str);
305         free(cls_str);
306         return ret;
307 }
308
309 uint16_t
310 rte_eth_iterator_next(struct rte_dev_iterator *iter)
311 {
312         if (iter == NULL) {
313                 RTE_ETHDEV_LOG(ERR,
314                         "Cannot get next device from NULL iterator\n");
315                 return RTE_MAX_ETHPORTS;
316         }
317
318         if (iter->cls == NULL) /* invalid ethdev iterator */
319                 return RTE_MAX_ETHPORTS;
320
321         do { /* loop to try all matching rte_device */
322                 /* If not pure ethdev filter and */
323                 if (iter->bus != NULL &&
324                                 /* not in middle of rte_eth_dev iteration, */
325                                 iter->class_device == NULL) {
326                         /* get next rte_device to try. */
327                         iter->device = iter->bus->dev_iterate(
328                                         iter->device, iter->bus_str, iter);
329                         if (iter->device == NULL)
330                                 break; /* no more rte_device candidate */
331                 }
332                 /* A device is matching bus part, need to check ethdev part. */
333                 iter->class_device = iter->cls->dev_iterate(
334                                 iter->class_device, iter->cls_str, iter);
335                 if (iter->class_device != NULL)
336                         return eth_dev_to_id(iter->class_device); /* match */
337         } while (iter->bus != NULL); /* need to try next rte_device */
338
339         /* No more ethdev port to iterate. */
340         rte_eth_iterator_cleanup(iter);
341         return RTE_MAX_ETHPORTS;
342 }
343
344 void
345 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
346 {
347         if (iter == NULL) {
348                 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
349                 return;
350         }
351
352         if (iter->bus_str == NULL)
353                 return; /* nothing to free in pure class filter */
354         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
355         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
356         memset(iter, 0, sizeof(*iter));
357 }
358
359 uint16_t
360 rte_eth_find_next(uint16_t port_id)
361 {
362         while (port_id < RTE_MAX_ETHPORTS &&
363                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
364                 port_id++;
365
366         if (port_id >= RTE_MAX_ETHPORTS)
367                 return RTE_MAX_ETHPORTS;
368
369         return port_id;
370 }
371
372 /*
373  * Macro to iterate over all valid ports for internal usage.
374  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
375  */
376 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
377         for (port_id = rte_eth_find_next(0); \
378              port_id < RTE_MAX_ETHPORTS; \
379              port_id = rte_eth_find_next(port_id + 1))
380
381 uint16_t
382 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
383 {
384         port_id = rte_eth_find_next(port_id);
385         while (port_id < RTE_MAX_ETHPORTS &&
386                         rte_eth_devices[port_id].device != parent)
387                 port_id = rte_eth_find_next(port_id + 1);
388
389         return port_id;
390 }
391
392 uint16_t
393 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
394 {
395         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
396         return rte_eth_find_next_of(port_id,
397                         rte_eth_devices[ref_port_id].device);
398 }
399
400 static void
401 eth_dev_shared_data_prepare(void)
402 {
403         const unsigned flags = 0;
404         const struct rte_memzone *mz;
405
406         rte_spinlock_lock(&eth_dev_shared_data_lock);
407
408         if (eth_dev_shared_data == NULL) {
409                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
410                         /* Allocate port data and ownership shared memory. */
411                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
412                                         sizeof(*eth_dev_shared_data),
413                                         rte_socket_id(), flags);
414                 } else
415                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
416                 if (mz == NULL)
417                         rte_panic("Cannot allocate ethdev shared data\n");
418
419                 eth_dev_shared_data = mz->addr;
420                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
421                         eth_dev_shared_data->next_owner_id =
422                                         RTE_ETH_DEV_NO_OWNER + 1;
423                         rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
424                         memset(eth_dev_shared_data->data, 0,
425                                sizeof(eth_dev_shared_data->data));
426                 }
427         }
428
429         rte_spinlock_unlock(&eth_dev_shared_data_lock);
430 }
431
432 static bool
433 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
434 {
435         return ethdev->data->name[0] != '\0';
436 }
437
438 static struct rte_eth_dev *
439 eth_dev_allocated(const char *name)
440 {
441         uint16_t i;
442
443         RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
444
445         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
446                 if (rte_eth_devices[i].data != NULL &&
447                     strcmp(rte_eth_devices[i].data->name, name) == 0)
448                         return &rte_eth_devices[i];
449         }
450         return NULL;
451 }
452
453 struct rte_eth_dev *
454 rte_eth_dev_allocated(const char *name)
455 {
456         struct rte_eth_dev *ethdev;
457
458         eth_dev_shared_data_prepare();
459
460         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
461
462         ethdev = eth_dev_allocated(name);
463
464         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
465
466         return ethdev;
467 }
468
469 static uint16_t
470 eth_dev_find_free_port(void)
471 {
472         uint16_t i;
473
474         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
475                 /* Using shared name field to find a free port. */
476                 if (eth_dev_shared_data->data[i].name[0] == '\0') {
477                         RTE_ASSERT(rte_eth_devices[i].state ==
478                                    RTE_ETH_DEV_UNUSED);
479                         return i;
480                 }
481         }
482         return RTE_MAX_ETHPORTS;
483 }
484
485 static struct rte_eth_dev *
486 eth_dev_get(uint16_t port_id)
487 {
488         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
489
490         eth_dev->data = &eth_dev_shared_data->data[port_id];
491
492         return eth_dev;
493 }
494
495 struct rte_eth_dev *
496 rte_eth_dev_allocate(const char *name)
497 {
498         uint16_t port_id;
499         struct rte_eth_dev *eth_dev = NULL;
500         size_t name_len;
501
502         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
503         if (name_len == 0) {
504                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
505                 return NULL;
506         }
507
508         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
509                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
510                 return NULL;
511         }
512
513         eth_dev_shared_data_prepare();
514
515         /* Synchronize port creation between primary and secondary threads. */
516         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
517
518         if (eth_dev_allocated(name) != NULL) {
519                 RTE_ETHDEV_LOG(ERR,
520                         "Ethernet device with name %s already allocated\n",
521                         name);
522                 goto unlock;
523         }
524
525         port_id = eth_dev_find_free_port();
526         if (port_id == RTE_MAX_ETHPORTS) {
527                 RTE_ETHDEV_LOG(ERR,
528                         "Reached maximum number of Ethernet ports\n");
529                 goto unlock;
530         }
531
532         eth_dev = eth_dev_get(port_id);
533         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
534         eth_dev->data->port_id = port_id;
535         eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
536         eth_dev->data->mtu = RTE_ETHER_MTU;
537         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
538
539 unlock:
540         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
541
542         return eth_dev;
543 }
544
545 /*
546  * Attach to a port already registered by the primary process, which
547  * makes sure that the same device would have the same port ID both
548  * in the primary and secondary process.
549  */
550 struct rte_eth_dev *
551 rte_eth_dev_attach_secondary(const char *name)
552 {
553         uint16_t i;
554         struct rte_eth_dev *eth_dev = NULL;
555
556         eth_dev_shared_data_prepare();
557
558         /* Synchronize port attachment to primary port creation and release. */
559         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
560
561         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
562                 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
563                         break;
564         }
565         if (i == RTE_MAX_ETHPORTS) {
566                 RTE_ETHDEV_LOG(ERR,
567                         "Device %s is not driven by the primary process\n",
568                         name);
569         } else {
570                 eth_dev = eth_dev_get(i);
571                 RTE_ASSERT(eth_dev->data->port_id == i);
572         }
573
574         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
575         return eth_dev;
576 }
577
578 int
579 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
580 {
581         if (eth_dev == NULL)
582                 return -EINVAL;
583
584         eth_dev_shared_data_prepare();
585
586         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
587                 rte_eth_dev_callback_process(eth_dev,
588                                 RTE_ETH_EVENT_DESTROY, NULL);
589
590         eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
591
592         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
593
594         eth_dev->state = RTE_ETH_DEV_UNUSED;
595         eth_dev->device = NULL;
596         eth_dev->process_private = NULL;
597         eth_dev->intr_handle = NULL;
598         eth_dev->rx_pkt_burst = NULL;
599         eth_dev->tx_pkt_burst = NULL;
600         eth_dev->tx_pkt_prepare = NULL;
601         eth_dev->rx_queue_count = NULL;
602         eth_dev->rx_descriptor_status = NULL;
603         eth_dev->tx_descriptor_status = NULL;
604         eth_dev->dev_ops = NULL;
605
606         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
607                 rte_free(eth_dev->data->rx_queues);
608                 rte_free(eth_dev->data->tx_queues);
609                 rte_free(eth_dev->data->mac_addrs);
610                 rte_free(eth_dev->data->hash_mac_addrs);
611                 rte_free(eth_dev->data->dev_private);
612                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
613                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
614         }
615
616         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
617
618         return 0;
619 }
620
621 int
622 rte_eth_dev_is_valid_port(uint16_t port_id)
623 {
624         if (port_id >= RTE_MAX_ETHPORTS ||
625             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
626                 return 0;
627         else
628                 return 1;
629 }
630
631 static int
632 eth_is_valid_owner_id(uint64_t owner_id)
633 {
634         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
635             eth_dev_shared_data->next_owner_id <= owner_id)
636                 return 0;
637         return 1;
638 }
639
640 uint64_t
641 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
642 {
643         port_id = rte_eth_find_next(port_id);
644         while (port_id < RTE_MAX_ETHPORTS &&
645                         rte_eth_devices[port_id].data->owner.id != owner_id)
646                 port_id = rte_eth_find_next(port_id + 1);
647
648         return port_id;
649 }
650
651 int
652 rte_eth_dev_owner_new(uint64_t *owner_id)
653 {
654         if (owner_id == NULL) {
655                 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
656                 return -EINVAL;
657         }
658
659         eth_dev_shared_data_prepare();
660
661         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
662
663         *owner_id = eth_dev_shared_data->next_owner_id++;
664
665         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
666         return 0;
667 }
668
669 static int
670 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
671                        const struct rte_eth_dev_owner *new_owner)
672 {
673         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
674         struct rte_eth_dev_owner *port_owner;
675
676         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
677                 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
678                         port_id);
679                 return -ENODEV;
680         }
681
682         if (new_owner == NULL) {
683                 RTE_ETHDEV_LOG(ERR,
684                         "Cannot set ethdev port %u owner from NULL owner\n",
685                         port_id);
686                 return -EINVAL;
687         }
688
689         if (!eth_is_valid_owner_id(new_owner->id) &&
690             !eth_is_valid_owner_id(old_owner_id)) {
691                 RTE_ETHDEV_LOG(ERR,
692                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
693                        old_owner_id, new_owner->id);
694                 return -EINVAL;
695         }
696
697         port_owner = &rte_eth_devices[port_id].data->owner;
698         if (port_owner->id != old_owner_id) {
699                 RTE_ETHDEV_LOG(ERR,
700                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
701                         port_id, port_owner->name, port_owner->id);
702                 return -EPERM;
703         }
704
705         /* can not truncate (same structure) */
706         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
707
708         port_owner->id = new_owner->id;
709
710         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
711                 port_id, new_owner->name, new_owner->id);
712
713         return 0;
714 }
715
716 int
717 rte_eth_dev_owner_set(const uint16_t port_id,
718                       const struct rte_eth_dev_owner *owner)
719 {
720         int ret;
721
722         eth_dev_shared_data_prepare();
723
724         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
725
726         ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
727
728         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
729         return ret;
730 }
731
732 int
733 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
734 {
735         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
736                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
737         int ret;
738
739         eth_dev_shared_data_prepare();
740
741         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
742
743         ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
744
745         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
746         return ret;
747 }
748
749 int
750 rte_eth_dev_owner_delete(const uint64_t owner_id)
751 {
752         uint16_t port_id;
753         int ret = 0;
754
755         eth_dev_shared_data_prepare();
756
757         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
758
759         if (eth_is_valid_owner_id(owner_id)) {
760                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
761                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
762                                 memset(&rte_eth_devices[port_id].data->owner, 0,
763                                        sizeof(struct rte_eth_dev_owner));
764                 RTE_ETHDEV_LOG(NOTICE,
765                         "All port owners owned by %016"PRIx64" identifier have removed\n",
766                         owner_id);
767         } else {
768                 RTE_ETHDEV_LOG(ERR,
769                                "Invalid owner ID=%016"PRIx64"\n",
770                                owner_id);
771                 ret = -EINVAL;
772         }
773
774         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
775
776         return ret;
777 }
778
779 int
780 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
781 {
782         struct rte_eth_dev *ethdev;
783
784         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
785         ethdev = &rte_eth_devices[port_id];
786
787         if (!eth_dev_is_allocated(ethdev)) {
788                 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
789                         port_id);
790                 return -ENODEV;
791         }
792
793         if (owner == NULL) {
794                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
795                         port_id);
796                 return -EINVAL;
797         }
798
799         eth_dev_shared_data_prepare();
800
801         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
802         rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
803         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
804
805         return 0;
806 }
807
808 int
809 rte_eth_dev_socket_id(uint16_t port_id)
810 {
811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
812         return rte_eth_devices[port_id].data->numa_node;
813 }
814
815 void *
816 rte_eth_dev_get_sec_ctx(uint16_t port_id)
817 {
818         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
819         return rte_eth_devices[port_id].security_ctx;
820 }
821
822 uint16_t
823 rte_eth_dev_count_avail(void)
824 {
825         uint16_t p;
826         uint16_t count;
827
828         count = 0;
829
830         RTE_ETH_FOREACH_DEV(p)
831                 count++;
832
833         return count;
834 }
835
836 uint16_t
837 rte_eth_dev_count_total(void)
838 {
839         uint16_t port, count = 0;
840
841         RTE_ETH_FOREACH_VALID_DEV(port)
842                 count++;
843
844         return count;
845 }
846
847 int
848 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
849 {
850         char *tmp;
851
852         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
853
854         if (name == NULL) {
855                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
856                         port_id);
857                 return -EINVAL;
858         }
859
860         /* shouldn't check 'rte_eth_devices[i].data',
861          * because it might be overwritten by VDEV PMD */
862         tmp = eth_dev_shared_data->data[port_id].name;
863         strcpy(name, tmp);
864         return 0;
865 }
866
867 int
868 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
869 {
870         uint16_t pid;
871
872         if (name == NULL) {
873                 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
874                 return -EINVAL;
875         }
876
877         if (port_id == NULL) {
878                 RTE_ETHDEV_LOG(ERR,
879                         "Cannot get port ID to NULL for %s\n", name);
880                 return -EINVAL;
881         }
882
883         RTE_ETH_FOREACH_VALID_DEV(pid)
884                 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
885                         *port_id = pid;
886                         return 0;
887                 }
888
889         return -ENODEV;
890 }
891
892 static int
893 eth_err(uint16_t port_id, int ret)
894 {
895         if (ret == 0)
896                 return 0;
897         if (rte_eth_dev_is_removed(port_id))
898                 return -EIO;
899         return ret;
900 }
901
902 static void
903 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
904 {
905         void **rxq = dev->data->rx_queues;
906
907         if (rxq[qid] == NULL)
908                 return;
909
910         if (dev->dev_ops->rx_queue_release != NULL)
911                 (*dev->dev_ops->rx_queue_release)(dev, qid);
912         rxq[qid] = NULL;
913 }
914
915 static void
916 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
917 {
918         void **txq = dev->data->tx_queues;
919
920         if (txq[qid] == NULL)
921                 return;
922
923         if (dev->dev_ops->tx_queue_release != NULL)
924                 (*dev->dev_ops->tx_queue_release)(dev, qid);
925         txq[qid] = NULL;
926 }
927
928 static int
929 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
930 {
931         uint16_t old_nb_queues = dev->data->nb_rx_queues;
932         unsigned i;
933
934         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
935                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
936                                 sizeof(dev->data->rx_queues[0]) *
937                                 RTE_MAX_QUEUES_PER_PORT,
938                                 RTE_CACHE_LINE_SIZE);
939                 if (dev->data->rx_queues == NULL) {
940                         dev->data->nb_rx_queues = 0;
941                         return -(ENOMEM);
942                 }
943         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
944                 for (i = nb_queues; i < old_nb_queues; i++)
945                         eth_dev_rxq_release(dev, i);
946
947         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
948                 for (i = nb_queues; i < old_nb_queues; i++)
949                         eth_dev_rxq_release(dev, i);
950
951                 rte_free(dev->data->rx_queues);
952                 dev->data->rx_queues = NULL;
953         }
954         dev->data->nb_rx_queues = nb_queues;
955         return 0;
956 }
957
958 static int
959 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
960 {
961         uint16_t port_id;
962
963         if (rx_queue_id >= dev->data->nb_rx_queues) {
964                 port_id = dev->data->port_id;
965                 RTE_ETHDEV_LOG(ERR,
966                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
967                                rx_queue_id, port_id);
968                 return -EINVAL;
969         }
970
971         if (dev->data->rx_queues[rx_queue_id] == NULL) {
972                 port_id = dev->data->port_id;
973                 RTE_ETHDEV_LOG(ERR,
974                                "Queue %u of device with port_id=%u has not been setup\n",
975                                rx_queue_id, port_id);
976                 return -EINVAL;
977         }
978
979         return 0;
980 }
981
982 static int
983 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
984 {
985         uint16_t port_id;
986
987         if (tx_queue_id >= dev->data->nb_tx_queues) {
988                 port_id = dev->data->port_id;
989                 RTE_ETHDEV_LOG(ERR,
990                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
991                                tx_queue_id, port_id);
992                 return -EINVAL;
993         }
994
995         if (dev->data->tx_queues[tx_queue_id] == NULL) {
996                 port_id = dev->data->port_id;
997                 RTE_ETHDEV_LOG(ERR,
998                                "Queue %u of device with port_id=%u has not been setup\n",
999                                tx_queue_id, port_id);
1000                 return -EINVAL;
1001         }
1002
1003         return 0;
1004 }
1005
1006 int
1007 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
1008 {
1009         struct rte_eth_dev *dev;
1010         int ret;
1011
1012         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1013         dev = &rte_eth_devices[port_id];
1014
1015         if (!dev->data->dev_started) {
1016                 RTE_ETHDEV_LOG(ERR,
1017                         "Port %u must be started before start any queue\n",
1018                         port_id);
1019                 return -EINVAL;
1020         }
1021
1022         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1023         if (ret != 0)
1024                 return ret;
1025
1026         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
1027
1028         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1029                 RTE_ETHDEV_LOG(INFO,
1030                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1031                         rx_queue_id, port_id);
1032                 return -EINVAL;
1033         }
1034
1035         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1036                 RTE_ETHDEV_LOG(INFO,
1037                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1038                         rx_queue_id, port_id);
1039                 return 0;
1040         }
1041
1042         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
1043 }
1044
1045 int
1046 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
1047 {
1048         struct rte_eth_dev *dev;
1049         int ret;
1050
1051         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1052         dev = &rte_eth_devices[port_id];
1053
1054         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1055         if (ret != 0)
1056                 return ret;
1057
1058         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
1059
1060         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1061                 RTE_ETHDEV_LOG(INFO,
1062                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1063                         rx_queue_id, port_id);
1064                 return -EINVAL;
1065         }
1066
1067         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1068                 RTE_ETHDEV_LOG(INFO,
1069                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1070                         rx_queue_id, port_id);
1071                 return 0;
1072         }
1073
1074         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1075 }
1076
1077 int
1078 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1079 {
1080         struct rte_eth_dev *dev;
1081         int ret;
1082
1083         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1084         dev = &rte_eth_devices[port_id];
1085
1086         if (!dev->data->dev_started) {
1087                 RTE_ETHDEV_LOG(ERR,
1088                         "Port %u must be started before start any queue\n",
1089                         port_id);
1090                 return -EINVAL;
1091         }
1092
1093         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1094         if (ret != 0)
1095                 return ret;
1096
1097         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1098
1099         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1100                 RTE_ETHDEV_LOG(INFO,
1101                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1102                         tx_queue_id, port_id);
1103                 return -EINVAL;
1104         }
1105
1106         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1107                 RTE_ETHDEV_LOG(INFO,
1108                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1109                         tx_queue_id, port_id);
1110                 return 0;
1111         }
1112
1113         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1114 }
1115
1116 int
1117 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1118 {
1119         struct rte_eth_dev *dev;
1120         int ret;
1121
1122         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1123         dev = &rte_eth_devices[port_id];
1124
1125         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1126         if (ret != 0)
1127                 return ret;
1128
1129         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1130
1131         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1132                 RTE_ETHDEV_LOG(INFO,
1133                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1134                         tx_queue_id, port_id);
1135                 return -EINVAL;
1136         }
1137
1138         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1139                 RTE_ETHDEV_LOG(INFO,
1140                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1141                         tx_queue_id, port_id);
1142                 return 0;
1143         }
1144
1145         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1146 }
1147
1148 static int
1149 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1150 {
1151         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1152         unsigned i;
1153
1154         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1155                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1156                                 sizeof(dev->data->tx_queues[0]) *
1157                                 RTE_MAX_QUEUES_PER_PORT,
1158                                 RTE_CACHE_LINE_SIZE);
1159                 if (dev->data->tx_queues == NULL) {
1160                         dev->data->nb_tx_queues = 0;
1161                         return -(ENOMEM);
1162                 }
1163         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1164                 for (i = nb_queues; i < old_nb_queues; i++)
1165                         eth_dev_txq_release(dev, i);
1166
1167         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1168                 for (i = nb_queues; i < old_nb_queues; i++)
1169                         eth_dev_txq_release(dev, i);
1170
1171                 rte_free(dev->data->tx_queues);
1172                 dev->data->tx_queues = NULL;
1173         }
1174         dev->data->nb_tx_queues = nb_queues;
1175         return 0;
1176 }
1177
1178 uint32_t
1179 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1180 {
1181         switch (speed) {
1182         case RTE_ETH_SPEED_NUM_10M:
1183                 return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
1184         case RTE_ETH_SPEED_NUM_100M:
1185                 return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
1186         case RTE_ETH_SPEED_NUM_1G:
1187                 return RTE_ETH_LINK_SPEED_1G;
1188         case RTE_ETH_SPEED_NUM_2_5G:
1189                 return RTE_ETH_LINK_SPEED_2_5G;
1190         case RTE_ETH_SPEED_NUM_5G:
1191                 return RTE_ETH_LINK_SPEED_5G;
1192         case RTE_ETH_SPEED_NUM_10G:
1193                 return RTE_ETH_LINK_SPEED_10G;
1194         case RTE_ETH_SPEED_NUM_20G:
1195                 return RTE_ETH_LINK_SPEED_20G;
1196         case RTE_ETH_SPEED_NUM_25G:
1197                 return RTE_ETH_LINK_SPEED_25G;
1198         case RTE_ETH_SPEED_NUM_40G:
1199                 return RTE_ETH_LINK_SPEED_40G;
1200         case RTE_ETH_SPEED_NUM_50G:
1201                 return RTE_ETH_LINK_SPEED_50G;
1202         case RTE_ETH_SPEED_NUM_56G:
1203                 return RTE_ETH_LINK_SPEED_56G;
1204         case RTE_ETH_SPEED_NUM_100G:
1205                 return RTE_ETH_LINK_SPEED_100G;
1206         case RTE_ETH_SPEED_NUM_200G:
1207                 return RTE_ETH_LINK_SPEED_200G;
1208         default:
1209                 return 0;
1210         }
1211 }
1212
1213 const char *
1214 rte_eth_dev_rx_offload_name(uint64_t offload)
1215 {
1216         const char *name = "UNKNOWN";
1217         unsigned int i;
1218
1219         for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1220                 if (offload == eth_dev_rx_offload_names[i].offload) {
1221                         name = eth_dev_rx_offload_names[i].name;
1222                         break;
1223                 }
1224         }
1225
1226         return name;
1227 }
1228
1229 const char *
1230 rte_eth_dev_tx_offload_name(uint64_t offload)
1231 {
1232         const char *name = "UNKNOWN";
1233         unsigned int i;
1234
1235         for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1236                 if (offload == eth_dev_tx_offload_names[i].offload) {
1237                         name = eth_dev_tx_offload_names[i].name;
1238                         break;
1239                 }
1240         }
1241
1242         return name;
1243 }
1244
1245 const char *
1246 rte_eth_dev_capability_name(uint64_t capability)
1247 {
1248         const char *name = "UNKNOWN";
1249         unsigned int i;
1250
1251         for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
1252                 if (capability == rte_eth_dev_capa_names[i].offload) {
1253                         name = rte_eth_dev_capa_names[i].name;
1254                         break;
1255                 }
1256         }
1257
1258         return name;
1259 }
1260
1261 static inline int
1262 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1263                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1264 {
1265         int ret = 0;
1266
1267         if (dev_info_size == 0) {
1268                 if (config_size != max_rx_pkt_len) {
1269                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1270                                        " %u != %u is not allowed\n",
1271                                        port_id, config_size, max_rx_pkt_len);
1272                         ret = -EINVAL;
1273                 }
1274         } else if (config_size > dev_info_size) {
1275                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1276                                "> max allowed value %u\n", port_id, config_size,
1277                                dev_info_size);
1278                 ret = -EINVAL;
1279         } else if (config_size < RTE_ETHER_MIN_LEN) {
1280                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1281                                "< min allowed value %u\n", port_id, config_size,
1282                                (unsigned int)RTE_ETHER_MIN_LEN);
1283                 ret = -EINVAL;
1284         }
1285         return ret;
1286 }
1287
1288 /*
1289  * Validate offloads that are requested through rte_eth_dev_configure against
1290  * the offloads successfully set by the Ethernet device.
1291  *
1292  * @param port_id
1293  *   The port identifier of the Ethernet device.
1294  * @param req_offloads
1295  *   The offloads that have been requested through `rte_eth_dev_configure`.
1296  * @param set_offloads
1297  *   The offloads successfully set by the Ethernet device.
1298  * @param offload_type
1299  *   The offload type i.e. Rx/Tx string.
1300  * @param offload_name
1301  *   The function that prints the offload name.
1302  * @return
1303  *   - (0) if validation successful.
1304  *   - (-EINVAL) if requested offload has been silently disabled.
1305  *
1306  */
1307 static int
1308 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1309                   uint64_t set_offloads, const char *offload_type,
1310                   const char *(*offload_name)(uint64_t))
1311 {
1312         uint64_t offloads_diff = req_offloads ^ set_offloads;
1313         uint64_t offload;
1314         int ret = 0;
1315
1316         while (offloads_diff != 0) {
1317                 /* Check if any offload is requested but not enabled. */
1318                 offload = RTE_BIT64(__builtin_ctzll(offloads_diff));
1319                 if (offload & req_offloads) {
1320                         RTE_ETHDEV_LOG(ERR,
1321                                 "Port %u failed to enable %s offload %s\n",
1322                                 port_id, offload_type, offload_name(offload));
1323                         ret = -EINVAL;
1324                 }
1325
1326                 /* Check if offload couldn't be disabled. */
1327                 if (offload & set_offloads) {
1328                         RTE_ETHDEV_LOG(DEBUG,
1329                                 "Port %u %s offload %s is not requested but enabled\n",
1330                                 port_id, offload_type, offload_name(offload));
1331                 }
1332
1333                 offloads_diff &= ~offload;
1334         }
1335
1336         return ret;
1337 }
1338
1339 static uint32_t
1340 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1341 {
1342         uint32_t overhead_len;
1343
1344         if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1345                 overhead_len = max_rx_pktlen - max_mtu;
1346         else
1347                 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1348
1349         return overhead_len;
1350 }
1351
1352 /* rte_eth_dev_info_get() should be called prior to this function */
1353 static int
1354 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
1355                 uint16_t mtu)
1356 {
1357         uint32_t overhead_len;
1358         uint32_t frame_size;
1359
1360         if (mtu < dev_info->min_mtu) {
1361                 RTE_ETHDEV_LOG(ERR,
1362                         "MTU (%u) < device min MTU (%u) for port_id %u\n",
1363                         mtu, dev_info->min_mtu, port_id);
1364                 return -EINVAL;
1365         }
1366         if (mtu > dev_info->max_mtu) {
1367                 RTE_ETHDEV_LOG(ERR,
1368                         "MTU (%u) > device max MTU (%u) for port_id %u\n",
1369                         mtu, dev_info->max_mtu, port_id);
1370                 return -EINVAL;
1371         }
1372
1373         overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1374                         dev_info->max_mtu);
1375         frame_size = mtu + overhead_len;
1376         if (frame_size < RTE_ETHER_MIN_LEN) {
1377                 RTE_ETHDEV_LOG(ERR,
1378                         "Frame size (%u) < min frame size (%u) for port_id %u\n",
1379                         frame_size, RTE_ETHER_MIN_LEN, port_id);
1380                 return -EINVAL;
1381         }
1382
1383         if (frame_size > dev_info->max_rx_pktlen) {
1384                 RTE_ETHDEV_LOG(ERR,
1385                         "Frame size (%u) > device max frame size (%u) for port_id %u\n",
1386                         frame_size, dev_info->max_rx_pktlen, port_id);
1387                 return -EINVAL;
1388         }
1389
1390         return 0;
1391 }
1392
1393 int
1394 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1395                       const struct rte_eth_conf *dev_conf)
1396 {
1397         struct rte_eth_dev *dev;
1398         struct rte_eth_dev_info dev_info;
1399         struct rte_eth_conf orig_conf;
1400         int diag;
1401         int ret;
1402         uint16_t old_mtu;
1403
1404         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1405         dev = &rte_eth_devices[port_id];
1406
1407         if (dev_conf == NULL) {
1408                 RTE_ETHDEV_LOG(ERR,
1409                         "Cannot configure ethdev port %u from NULL config\n",
1410                         port_id);
1411                 return -EINVAL;
1412         }
1413
1414         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1415
1416         if (dev->data->dev_started) {
1417                 RTE_ETHDEV_LOG(ERR,
1418                         "Port %u must be stopped to allow configuration\n",
1419                         port_id);
1420                 return -EBUSY;
1421         }
1422
1423         /*
1424          * Ensure that "dev_configured" is always 0 each time prepare to do
1425          * dev_configure() to avoid any non-anticipated behaviour.
1426          * And set to 1 when dev_configure() is executed successfully.
1427          */
1428         dev->data->dev_configured = 0;
1429
1430          /* Store original config, as rollback required on failure */
1431         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1432
1433         /*
1434          * Copy the dev_conf parameter into the dev structure.
1435          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1436          */
1437         if (dev_conf != &dev->data->dev_conf)
1438                 memcpy(&dev->data->dev_conf, dev_conf,
1439                        sizeof(dev->data->dev_conf));
1440
1441         /* Backup mtu for rollback */
1442         old_mtu = dev->data->mtu;
1443
1444         ret = rte_eth_dev_info_get(port_id, &dev_info);
1445         if (ret != 0)
1446                 goto rollback;
1447
1448         /* If number of queues specified by application for both Rx and Tx is
1449          * zero, use driver preferred values. This cannot be done individually
1450          * as it is valid for either Tx or Rx (but not both) to be zero.
1451          * If driver does not provide any preferred valued, fall back on
1452          * EAL defaults.
1453          */
1454         if (nb_rx_q == 0 && nb_tx_q == 0) {
1455                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1456                 if (nb_rx_q == 0)
1457                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1458                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1459                 if (nb_tx_q == 0)
1460                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1461         }
1462
1463         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1464                 RTE_ETHDEV_LOG(ERR,
1465                         "Number of Rx queues requested (%u) is greater than max supported(%d)\n",
1466                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1467                 ret = -EINVAL;
1468                 goto rollback;
1469         }
1470
1471         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1472                 RTE_ETHDEV_LOG(ERR,
1473                         "Number of Tx queues requested (%u) is greater than max supported(%d)\n",
1474                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1475                 ret = -EINVAL;
1476                 goto rollback;
1477         }
1478
1479         /*
1480          * Check that the numbers of Rx and Tx queues are not greater
1481          * than the maximum number of Rx and Tx queues supported by the
1482          * configured device.
1483          */
1484         if (nb_rx_q > dev_info.max_rx_queues) {
1485                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1486                         port_id, nb_rx_q, dev_info.max_rx_queues);
1487                 ret = -EINVAL;
1488                 goto rollback;
1489         }
1490
1491         if (nb_tx_q > dev_info.max_tx_queues) {
1492                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1493                         port_id, nb_tx_q, dev_info.max_tx_queues);
1494                 ret = -EINVAL;
1495                 goto rollback;
1496         }
1497
1498         /* Check that the device supports requested interrupts */
1499         if ((dev_conf->intr_conf.lsc == 1) &&
1500                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1501                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1502                         dev->device->driver->name);
1503                 ret = -EINVAL;
1504                 goto rollback;
1505         }
1506         if ((dev_conf->intr_conf.rmv == 1) &&
1507                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1508                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1509                         dev->device->driver->name);
1510                 ret = -EINVAL;
1511                 goto rollback;
1512         }
1513
1514         if (dev_conf->rxmode.mtu == 0)
1515                 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
1516
1517         ret = eth_dev_validate_mtu(port_id, &dev_info,
1518                         dev->data->dev_conf.rxmode.mtu);
1519         if (ret != 0)
1520                 goto rollback;
1521
1522         dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1523
1524         /*
1525          * If LRO is enabled, check that the maximum aggregated packet
1526          * size is supported by the configured device.
1527          */
1528         if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1529                 uint32_t max_rx_pktlen;
1530                 uint32_t overhead_len;
1531
1532                 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1533                                 dev_info.max_mtu);
1534                 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1535                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1536                         dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1537                 ret = eth_dev_check_lro_pkt_size(port_id,
1538                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1539                                 max_rx_pktlen,
1540                                 dev_info.max_lro_pkt_size);
1541                 if (ret != 0)
1542                         goto rollback;
1543         }
1544
1545         /* Any requested offloading must be within its device capabilities */
1546         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1547              dev_conf->rxmode.offloads) {
1548                 RTE_ETHDEV_LOG(ERR,
1549                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1550                         "capabilities 0x%"PRIx64" in %s()\n",
1551                         port_id, dev_conf->rxmode.offloads,
1552                         dev_info.rx_offload_capa,
1553                         __func__);
1554                 ret = -EINVAL;
1555                 goto rollback;
1556         }
1557         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1558              dev_conf->txmode.offloads) {
1559                 RTE_ETHDEV_LOG(ERR,
1560                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1561                         "capabilities 0x%"PRIx64" in %s()\n",
1562                         port_id, dev_conf->txmode.offloads,
1563                         dev_info.tx_offload_capa,
1564                         __func__);
1565                 ret = -EINVAL;
1566                 goto rollback;
1567         }
1568
1569         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1570                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1571
1572         /* Check that device supports requested rss hash functions. */
1573         if ((dev_info.flow_type_rss_offloads |
1574              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1575             dev_info.flow_type_rss_offloads) {
1576                 RTE_ETHDEV_LOG(ERR,
1577                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1578                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1579                         dev_info.flow_type_rss_offloads);
1580                 ret = -EINVAL;
1581                 goto rollback;
1582         }
1583
1584         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1585         if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
1586             (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
1587                 RTE_ETHDEV_LOG(ERR,
1588                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1589                         port_id,
1590                         rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
1591                 ret = -EINVAL;
1592                 goto rollback;
1593         }
1594
1595         /*
1596          * Setup new number of Rx/Tx queues and reconfigure device.
1597          */
1598         diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1599         if (diag != 0) {
1600                 RTE_ETHDEV_LOG(ERR,
1601                         "Port%u eth_dev_rx_queue_config = %d\n",
1602                         port_id, diag);
1603                 ret = diag;
1604                 goto rollback;
1605         }
1606
1607         diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1608         if (diag != 0) {
1609                 RTE_ETHDEV_LOG(ERR,
1610                         "Port%u eth_dev_tx_queue_config = %d\n",
1611                         port_id, diag);
1612                 eth_dev_rx_queue_config(dev, 0);
1613                 ret = diag;
1614                 goto rollback;
1615         }
1616
1617         diag = (*dev->dev_ops->dev_configure)(dev);
1618         if (diag != 0) {
1619                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1620                         port_id, diag);
1621                 ret = eth_err(port_id, diag);
1622                 goto reset_queues;
1623         }
1624
1625         /* Initialize Rx profiling if enabled at compilation time. */
1626         diag = __rte_eth_dev_profile_init(port_id, dev);
1627         if (diag != 0) {
1628                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1629                         port_id, diag);
1630                 ret = eth_err(port_id, diag);
1631                 goto reset_queues;
1632         }
1633
1634         /* Validate Rx offloads. */
1635         diag = eth_dev_validate_offloads(port_id,
1636                         dev_conf->rxmode.offloads,
1637                         dev->data->dev_conf.rxmode.offloads, "Rx",
1638                         rte_eth_dev_rx_offload_name);
1639         if (diag != 0) {
1640                 ret = diag;
1641                 goto reset_queues;
1642         }
1643
1644         /* Validate Tx offloads. */
1645         diag = eth_dev_validate_offloads(port_id,
1646                         dev_conf->txmode.offloads,
1647                         dev->data->dev_conf.txmode.offloads, "Tx",
1648                         rte_eth_dev_tx_offload_name);
1649         if (diag != 0) {
1650                 ret = diag;
1651                 goto reset_queues;
1652         }
1653
1654         dev->data->dev_configured = 1;
1655         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1656         return 0;
1657 reset_queues:
1658         eth_dev_rx_queue_config(dev, 0);
1659         eth_dev_tx_queue_config(dev, 0);
1660 rollback:
1661         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1662         if (old_mtu != dev->data->mtu)
1663                 dev->data->mtu = old_mtu;
1664
1665         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1666         return ret;
1667 }
1668
1669 void
1670 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1671 {
1672         if (dev->data->dev_started) {
1673                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1674                         dev->data->port_id);
1675                 return;
1676         }
1677
1678         eth_dev_rx_queue_config(dev, 0);
1679         eth_dev_tx_queue_config(dev, 0);
1680
1681         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1682 }
1683
1684 static void
1685 eth_dev_mac_restore(struct rte_eth_dev *dev,
1686                         struct rte_eth_dev_info *dev_info)
1687 {
1688         struct rte_ether_addr *addr;
1689         uint16_t i;
1690         uint32_t pool = 0;
1691         uint64_t pool_mask;
1692
1693         /* replay MAC address configuration including default MAC */
1694         addr = &dev->data->mac_addrs[0];
1695         if (*dev->dev_ops->mac_addr_set != NULL)
1696                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1697         else if (*dev->dev_ops->mac_addr_add != NULL)
1698                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1699
1700         if (*dev->dev_ops->mac_addr_add != NULL) {
1701                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1702                         addr = &dev->data->mac_addrs[i];
1703
1704                         /* skip zero address */
1705                         if (rte_is_zero_ether_addr(addr))
1706                                 continue;
1707
1708                         pool = 0;
1709                         pool_mask = dev->data->mac_pool_sel[i];
1710
1711                         do {
1712                                 if (pool_mask & UINT64_C(1))
1713                                         (*dev->dev_ops->mac_addr_add)(dev,
1714                                                 addr, i, pool);
1715                                 pool_mask >>= 1;
1716                                 pool++;
1717                         } while (pool_mask);
1718                 }
1719         }
1720 }
1721
1722 static int
1723 eth_dev_config_restore(struct rte_eth_dev *dev,
1724                 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1725 {
1726         int ret;
1727
1728         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1729                 eth_dev_mac_restore(dev, dev_info);
1730
1731         /* replay promiscuous configuration */
1732         /*
1733          * use callbacks directly since we don't need port_id check and
1734          * would like to bypass the same value set
1735          */
1736         if (rte_eth_promiscuous_get(port_id) == 1 &&
1737             *dev->dev_ops->promiscuous_enable != NULL) {
1738                 ret = eth_err(port_id,
1739                               (*dev->dev_ops->promiscuous_enable)(dev));
1740                 if (ret != 0 && ret != -ENOTSUP) {
1741                         RTE_ETHDEV_LOG(ERR,
1742                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1743                                 port_id, rte_strerror(-ret));
1744                         return ret;
1745                 }
1746         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1747                    *dev->dev_ops->promiscuous_disable != NULL) {
1748                 ret = eth_err(port_id,
1749                               (*dev->dev_ops->promiscuous_disable)(dev));
1750                 if (ret != 0 && ret != -ENOTSUP) {
1751                         RTE_ETHDEV_LOG(ERR,
1752                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1753                                 port_id, rte_strerror(-ret));
1754                         return ret;
1755                 }
1756         }
1757
1758         /* replay all multicast configuration */
1759         /*
1760          * use callbacks directly since we don't need port_id check and
1761          * would like to bypass the same value set
1762          */
1763         if (rte_eth_allmulticast_get(port_id) == 1 &&
1764             *dev->dev_ops->allmulticast_enable != NULL) {
1765                 ret = eth_err(port_id,
1766                               (*dev->dev_ops->allmulticast_enable)(dev));
1767                 if (ret != 0 && ret != -ENOTSUP) {
1768                         RTE_ETHDEV_LOG(ERR,
1769                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1770                                 port_id, rte_strerror(-ret));
1771                         return ret;
1772                 }
1773         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1774                    *dev->dev_ops->allmulticast_disable != NULL) {
1775                 ret = eth_err(port_id,
1776                               (*dev->dev_ops->allmulticast_disable)(dev));
1777                 if (ret != 0 && ret != -ENOTSUP) {
1778                         RTE_ETHDEV_LOG(ERR,
1779                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1780                                 port_id, rte_strerror(-ret));
1781                         return ret;
1782                 }
1783         }
1784
1785         return 0;
1786 }
1787
1788 int
1789 rte_eth_dev_start(uint16_t port_id)
1790 {
1791         struct rte_eth_dev *dev;
1792         struct rte_eth_dev_info dev_info;
1793         int diag;
1794         int ret, ret_stop;
1795
1796         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1797         dev = &rte_eth_devices[port_id];
1798
1799         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1800
1801         if (dev->data->dev_configured == 0) {
1802                 RTE_ETHDEV_LOG(INFO,
1803                         "Device with port_id=%"PRIu16" is not configured.\n",
1804                         port_id);
1805                 return -EINVAL;
1806         }
1807
1808         if (dev->data->dev_started != 0) {
1809                 RTE_ETHDEV_LOG(INFO,
1810                         "Device with port_id=%"PRIu16" already started\n",
1811                         port_id);
1812                 return 0;
1813         }
1814
1815         ret = rte_eth_dev_info_get(port_id, &dev_info);
1816         if (ret != 0)
1817                 return ret;
1818
1819         /* Lets restore MAC now if device does not support live change */
1820         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1821                 eth_dev_mac_restore(dev, &dev_info);
1822
1823         diag = (*dev->dev_ops->dev_start)(dev);
1824         if (diag == 0)
1825                 dev->data->dev_started = 1;
1826         else
1827                 return eth_err(port_id, diag);
1828
1829         ret = eth_dev_config_restore(dev, &dev_info, port_id);
1830         if (ret != 0) {
1831                 RTE_ETHDEV_LOG(ERR,
1832                         "Error during restoring configuration for device (port %u): %s\n",
1833                         port_id, rte_strerror(-ret));
1834                 ret_stop = rte_eth_dev_stop(port_id);
1835                 if (ret_stop != 0) {
1836                         RTE_ETHDEV_LOG(ERR,
1837                                 "Failed to stop device (port %u): %s\n",
1838                                 port_id, rte_strerror(-ret_stop));
1839                 }
1840
1841                 return ret;
1842         }
1843
1844         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1845                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1846                 (*dev->dev_ops->link_update)(dev, 0);
1847         }
1848
1849         /* expose selection of PMD fast-path functions */
1850         eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1851
1852         rte_ethdev_trace_start(port_id);
1853         return 0;
1854 }
1855
1856 int
1857 rte_eth_dev_stop(uint16_t port_id)
1858 {
1859         struct rte_eth_dev *dev;
1860         int ret;
1861
1862         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1863         dev = &rte_eth_devices[port_id];
1864
1865         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1866
1867         if (dev->data->dev_started == 0) {
1868                 RTE_ETHDEV_LOG(INFO,
1869                         "Device with port_id=%"PRIu16" already stopped\n",
1870                         port_id);
1871                 return 0;
1872         }
1873
1874         /* point fast-path functions to dummy ones */
1875         eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
1876
1877         dev->data->dev_started = 0;
1878         ret = (*dev->dev_ops->dev_stop)(dev);
1879         rte_ethdev_trace_stop(port_id, ret);
1880
1881         return ret;
1882 }
1883
1884 int
1885 rte_eth_dev_set_link_up(uint16_t port_id)
1886 {
1887         struct rte_eth_dev *dev;
1888
1889         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1890         dev = &rte_eth_devices[port_id];
1891
1892         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1893         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1894 }
1895
1896 int
1897 rte_eth_dev_set_link_down(uint16_t port_id)
1898 {
1899         struct rte_eth_dev *dev;
1900
1901         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1902         dev = &rte_eth_devices[port_id];
1903
1904         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1905         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1906 }
1907
1908 int
1909 rte_eth_dev_close(uint16_t port_id)
1910 {
1911         struct rte_eth_dev *dev;
1912         int firsterr, binerr;
1913         int *lasterr = &firsterr;
1914
1915         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1916         dev = &rte_eth_devices[port_id];
1917
1918         if (dev->data->dev_started) {
1919                 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
1920                                port_id);
1921                 return -EINVAL;
1922         }
1923
1924         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1925         *lasterr = (*dev->dev_ops->dev_close)(dev);
1926         if (*lasterr != 0)
1927                 lasterr = &binerr;
1928
1929         rte_ethdev_trace_close(port_id);
1930         *lasterr = rte_eth_dev_release_port(dev);
1931
1932         return firsterr;
1933 }
1934
1935 int
1936 rte_eth_dev_reset(uint16_t port_id)
1937 {
1938         struct rte_eth_dev *dev;
1939         int ret;
1940
1941         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1942         dev = &rte_eth_devices[port_id];
1943
1944         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1945
1946         ret = rte_eth_dev_stop(port_id);
1947         if (ret != 0) {
1948                 RTE_ETHDEV_LOG(ERR,
1949                         "Failed to stop device (port %u) before reset: %s - ignore\n",
1950                         port_id, rte_strerror(-ret));
1951         }
1952         ret = dev->dev_ops->dev_reset(dev);
1953
1954         return eth_err(port_id, ret);
1955 }
1956
1957 int
1958 rte_eth_dev_is_removed(uint16_t port_id)
1959 {
1960         struct rte_eth_dev *dev;
1961         int ret;
1962
1963         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1964         dev = &rte_eth_devices[port_id];
1965
1966         if (dev->state == RTE_ETH_DEV_REMOVED)
1967                 return 1;
1968
1969         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1970
1971         ret = dev->dev_ops->is_removed(dev);
1972         if (ret != 0)
1973                 /* Device is physically removed. */
1974                 dev->state = RTE_ETH_DEV_REMOVED;
1975
1976         return ret;
1977 }
1978
1979 static int
1980 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1981                              uint16_t n_seg, uint32_t *mbp_buf_size,
1982                              const struct rte_eth_dev_info *dev_info)
1983 {
1984         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1985         struct rte_mempool *mp_first;
1986         uint32_t offset_mask;
1987         uint16_t seg_idx;
1988
1989         if (n_seg > seg_capa->max_nseg) {
1990                 RTE_ETHDEV_LOG(ERR,
1991                                "Requested Rx segments %u exceed supported %u\n",
1992                                n_seg, seg_capa->max_nseg);
1993                 return -EINVAL;
1994         }
1995         /*
1996          * Check the sizes and offsets against buffer sizes
1997          * for each segment specified in extended configuration.
1998          */
1999         mp_first = rx_seg[0].mp;
2000         offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
2001         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
2002                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
2003                 uint32_t length = rx_seg[seg_idx].length;
2004                 uint32_t offset = rx_seg[seg_idx].offset;
2005
2006                 if (mpl == NULL) {
2007                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
2008                         return -EINVAL;
2009                 }
2010                 if (seg_idx != 0 && mp_first != mpl &&
2011                     seg_capa->multi_pools == 0) {
2012                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
2013                         return -ENOTSUP;
2014                 }
2015                 if (offset != 0) {
2016                         if (seg_capa->offset_allowed == 0) {
2017                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
2018                                 return -ENOTSUP;
2019                         }
2020                         if (offset & offset_mask) {
2021                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
2022                                                offset,
2023                                                seg_capa->offset_align_log2);
2024                                 return -EINVAL;
2025                         }
2026                 }
2027                 if (mpl->private_data_size <
2028                         sizeof(struct rte_pktmbuf_pool_private)) {
2029                         RTE_ETHDEV_LOG(ERR,
2030                                        "%s private_data_size %u < %u\n",
2031                                        mpl->name, mpl->private_data_size,
2032                                        (unsigned int)sizeof
2033                                         (struct rte_pktmbuf_pool_private));
2034                         return -ENOSPC;
2035                 }
2036                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
2037                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
2038                 length = length != 0 ? length : *mbp_buf_size;
2039                 if (*mbp_buf_size < length + offset) {
2040                         RTE_ETHDEV_LOG(ERR,
2041                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
2042                                        mpl->name, *mbp_buf_size,
2043                                        length + offset, length, offset);
2044                         return -EINVAL;
2045                 }
2046         }
2047         return 0;
2048 }
2049
2050 int
2051 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2052                        uint16_t nb_rx_desc, unsigned int socket_id,
2053                        const struct rte_eth_rxconf *rx_conf,
2054                        struct rte_mempool *mp)
2055 {
2056         int ret;
2057         uint32_t mbp_buf_size;
2058         struct rte_eth_dev *dev;
2059         struct rte_eth_dev_info dev_info;
2060         struct rte_eth_rxconf local_conf;
2061
2062         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2063         dev = &rte_eth_devices[port_id];
2064
2065         if (rx_queue_id >= dev->data->nb_rx_queues) {
2066                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
2067                 return -EINVAL;
2068         }
2069
2070         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
2071
2072         ret = rte_eth_dev_info_get(port_id, &dev_info);
2073         if (ret != 0)
2074                 return ret;
2075
2076         if (mp != NULL) {
2077                 /* Single pool configuration check. */
2078                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
2079                         RTE_ETHDEV_LOG(ERR,
2080                                        "Ambiguous segment configuration\n");
2081                         return -EINVAL;
2082                 }
2083                 /*
2084                  * Check the size of the mbuf data buffer, this value
2085                  * must be provided in the private data of the memory pool.
2086                  * First check that the memory pool(s) has a valid private data.
2087                  */
2088                 if (mp->private_data_size <
2089                                 sizeof(struct rte_pktmbuf_pool_private)) {
2090                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
2091                                 mp->name, mp->private_data_size,
2092                                 (unsigned int)
2093                                 sizeof(struct rte_pktmbuf_pool_private));
2094                         return -ENOSPC;
2095                 }
2096                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
2097                 if (mbp_buf_size < dev_info.min_rx_bufsize +
2098                                    RTE_PKTMBUF_HEADROOM) {
2099                         RTE_ETHDEV_LOG(ERR,
2100                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
2101                                        mp->name, mbp_buf_size,
2102                                        RTE_PKTMBUF_HEADROOM +
2103                                        dev_info.min_rx_bufsize,
2104                                        RTE_PKTMBUF_HEADROOM,
2105                                        dev_info.min_rx_bufsize);
2106                         return -EINVAL;
2107                 }
2108         } else {
2109                 const struct rte_eth_rxseg_split *rx_seg;
2110                 uint16_t n_seg;
2111
2112                 /* Extended multi-segment configuration check. */
2113                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
2114                         RTE_ETHDEV_LOG(ERR,
2115                                        "Memory pool is null and no extended configuration provided\n");
2116                         return -EINVAL;
2117                 }
2118
2119                 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2120                 n_seg = rx_conf->rx_nseg;
2121
2122                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2123                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
2124                                                            &mbp_buf_size,
2125                                                            &dev_info);
2126                         if (ret != 0)
2127                                 return ret;
2128                 } else {
2129                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2130                         return -EINVAL;
2131                 }
2132         }
2133
2134         /* Use default specified by driver, if nb_rx_desc is zero */
2135         if (nb_rx_desc == 0) {
2136                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
2137                 /* If driver default is also zero, fall back on EAL default */
2138                 if (nb_rx_desc == 0)
2139                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2140         }
2141
2142         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2143                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2144                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2145
2146                 RTE_ETHDEV_LOG(ERR,
2147                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2148                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2149                         dev_info.rx_desc_lim.nb_min,
2150                         dev_info.rx_desc_lim.nb_align);
2151                 return -EINVAL;
2152         }
2153
2154         if (dev->data->dev_started &&
2155                 !(dev_info.dev_capa &
2156                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2157                 return -EBUSY;
2158
2159         if (dev->data->dev_started &&
2160                 (dev->data->rx_queue_state[rx_queue_id] !=
2161                         RTE_ETH_QUEUE_STATE_STOPPED))
2162                 return -EBUSY;
2163
2164         eth_dev_rxq_release(dev, rx_queue_id);
2165
2166         if (rx_conf == NULL)
2167                 rx_conf = &dev_info.default_rxconf;
2168
2169         local_conf = *rx_conf;
2170
2171         /*
2172          * If an offloading has already been enabled in
2173          * rte_eth_dev_configure(), it has been enabled on all queues,
2174          * so there is no need to enable it in this queue again.
2175          * The local_conf.offloads input to underlying PMD only carries
2176          * those offloadings which are only enabled on this queue and
2177          * not enabled on all queues.
2178          */
2179         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2180
2181         /*
2182          * New added offloadings for this queue are those not enabled in
2183          * rte_eth_dev_configure() and they must be per-queue type.
2184          * A pure per-port offloading can't be enabled on a queue while
2185          * disabled on another queue. A pure per-port offloading can't
2186          * be enabled for any queue as new added one if it hasn't been
2187          * enabled in rte_eth_dev_configure().
2188          */
2189         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2190              local_conf.offloads) {
2191                 RTE_ETHDEV_LOG(ERR,
2192                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2193                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2194                         port_id, rx_queue_id, local_conf.offloads,
2195                         dev_info.rx_queue_offload_capa,
2196                         __func__);
2197                 return -EINVAL;
2198         }
2199
2200         if (local_conf.share_group > 0 &&
2201             (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
2202                 RTE_ETHDEV_LOG(ERR,
2203                         "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
2204                         port_id, rx_queue_id, local_conf.share_group);
2205                 return -EINVAL;
2206         }
2207
2208         /*
2209          * If LRO is enabled, check that the maximum aggregated packet
2210          * size is supported by the configured device.
2211          */
2212         /* Get the real Ethernet overhead length */
2213         if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
2214                 uint32_t overhead_len;
2215                 uint32_t max_rx_pktlen;
2216                 int ret;
2217
2218                 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
2219                                 dev_info.max_mtu);
2220                 max_rx_pktlen = dev->data->mtu + overhead_len;
2221                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2222                         dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
2223                 ret = eth_dev_check_lro_pkt_size(port_id,
2224                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2225                                 max_rx_pktlen,
2226                                 dev_info.max_lro_pkt_size);
2227                 if (ret != 0)
2228                         return ret;
2229         }
2230
2231         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2232                                               socket_id, &local_conf, mp);
2233         if (!ret) {
2234                 if (!dev->data->min_rx_buf_size ||
2235                     dev->data->min_rx_buf_size > mbp_buf_size)
2236                         dev->data->min_rx_buf_size = mbp_buf_size;
2237         }
2238
2239         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2240                 rx_conf, ret);
2241         return eth_err(port_id, ret);
2242 }
2243
2244 int
2245 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2246                                uint16_t nb_rx_desc,
2247                                const struct rte_eth_hairpin_conf *conf)
2248 {
2249         int ret;
2250         struct rte_eth_dev *dev;
2251         struct rte_eth_hairpin_cap cap;
2252         int i;
2253         int count;
2254
2255         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2256         dev = &rte_eth_devices[port_id];
2257
2258         if (rx_queue_id >= dev->data->nb_rx_queues) {
2259                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
2260                 return -EINVAL;
2261         }
2262
2263         if (conf == NULL) {
2264                 RTE_ETHDEV_LOG(ERR,
2265                         "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
2266                         port_id);
2267                 return -EINVAL;
2268         }
2269
2270         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2271         if (ret != 0)
2272                 return ret;
2273         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2274                                 -ENOTSUP);
2275         /* if nb_rx_desc is zero use max number of desc from the driver. */
2276         if (nb_rx_desc == 0)
2277                 nb_rx_desc = cap.max_nb_desc;
2278         if (nb_rx_desc > cap.max_nb_desc) {
2279                 RTE_ETHDEV_LOG(ERR,
2280                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2281                         nb_rx_desc, cap.max_nb_desc);
2282                 return -EINVAL;
2283         }
2284         if (conf->peer_count > cap.max_rx_2_tx) {
2285                 RTE_ETHDEV_LOG(ERR,
2286                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2287                         conf->peer_count, cap.max_rx_2_tx);
2288                 return -EINVAL;
2289         }
2290         if (conf->peer_count == 0) {
2291                 RTE_ETHDEV_LOG(ERR,
2292                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2293                         conf->peer_count);
2294                 return -EINVAL;
2295         }
2296         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2297              cap.max_nb_queues != UINT16_MAX; i++) {
2298                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2299                         count++;
2300         }
2301         if (count > cap.max_nb_queues) {
2302                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2303                 cap.max_nb_queues);
2304                 return -EINVAL;
2305         }
2306         if (dev->data->dev_started)
2307                 return -EBUSY;
2308         eth_dev_rxq_release(dev, rx_queue_id);
2309         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2310                                                       nb_rx_desc, conf);
2311         if (ret == 0)
2312                 dev->data->rx_queue_state[rx_queue_id] =
2313                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2314         return eth_err(port_id, ret);
2315 }
2316
2317 int
2318 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2319                        uint16_t nb_tx_desc, unsigned int socket_id,
2320                        const struct rte_eth_txconf *tx_conf)
2321 {
2322         struct rte_eth_dev *dev;
2323         struct rte_eth_dev_info dev_info;
2324         struct rte_eth_txconf local_conf;
2325         int ret;
2326
2327         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2328         dev = &rte_eth_devices[port_id];
2329
2330         if (tx_queue_id >= dev->data->nb_tx_queues) {
2331                 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2332                 return -EINVAL;
2333         }
2334
2335         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2336
2337         ret = rte_eth_dev_info_get(port_id, &dev_info);
2338         if (ret != 0)
2339                 return ret;
2340
2341         /* Use default specified by driver, if nb_tx_desc is zero */
2342         if (nb_tx_desc == 0) {
2343                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2344                 /* If driver default is zero, fall back on EAL default */
2345                 if (nb_tx_desc == 0)
2346                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2347         }
2348         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2349             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2350             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2351                 RTE_ETHDEV_LOG(ERR,
2352                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2353                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2354                         dev_info.tx_desc_lim.nb_min,
2355                         dev_info.tx_desc_lim.nb_align);
2356                 return -EINVAL;
2357         }
2358
2359         if (dev->data->dev_started &&
2360                 !(dev_info.dev_capa &
2361                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2362                 return -EBUSY;
2363
2364         if (dev->data->dev_started &&
2365                 (dev->data->tx_queue_state[tx_queue_id] !=
2366                         RTE_ETH_QUEUE_STATE_STOPPED))
2367                 return -EBUSY;
2368
2369         eth_dev_txq_release(dev, tx_queue_id);
2370
2371         if (tx_conf == NULL)
2372                 tx_conf = &dev_info.default_txconf;
2373
2374         local_conf = *tx_conf;
2375
2376         /*
2377          * If an offloading has already been enabled in
2378          * rte_eth_dev_configure(), it has been enabled on all queues,
2379          * so there is no need to enable it in this queue again.
2380          * The local_conf.offloads input to underlying PMD only carries
2381          * those offloadings which are only enabled on this queue and
2382          * not enabled on all queues.
2383          */
2384         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2385
2386         /*
2387          * New added offloadings for this queue are those not enabled in
2388          * rte_eth_dev_configure() and they must be per-queue type.
2389          * A pure per-port offloading can't be enabled on a queue while
2390          * disabled on another queue. A pure per-port offloading can't
2391          * be enabled for any queue as new added one if it hasn't been
2392          * enabled in rte_eth_dev_configure().
2393          */
2394         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2395              local_conf.offloads) {
2396                 RTE_ETHDEV_LOG(ERR,
2397                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2398                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2399                         port_id, tx_queue_id, local_conf.offloads,
2400                         dev_info.tx_queue_offload_capa,
2401                         __func__);
2402                 return -EINVAL;
2403         }
2404
2405         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2406         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2407                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2408 }
2409
2410 int
2411 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2412                                uint16_t nb_tx_desc,
2413                                const struct rte_eth_hairpin_conf *conf)
2414 {
2415         struct rte_eth_dev *dev;
2416         struct rte_eth_hairpin_cap cap;
2417         int i;
2418         int count;
2419         int ret;
2420
2421         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2422         dev = &rte_eth_devices[port_id];
2423
2424         if (tx_queue_id >= dev->data->nb_tx_queues) {
2425                 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2426                 return -EINVAL;
2427         }
2428
2429         if (conf == NULL) {
2430                 RTE_ETHDEV_LOG(ERR,
2431                         "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2432                         port_id);
2433                 return -EINVAL;
2434         }
2435
2436         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2437         if (ret != 0)
2438                 return ret;
2439         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2440                                 -ENOTSUP);
2441         /* if nb_rx_desc is zero use max number of desc from the driver. */
2442         if (nb_tx_desc == 0)
2443                 nb_tx_desc = cap.max_nb_desc;
2444         if (nb_tx_desc > cap.max_nb_desc) {
2445                 RTE_ETHDEV_LOG(ERR,
2446                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2447                         nb_tx_desc, cap.max_nb_desc);
2448                 return -EINVAL;
2449         }
2450         if (conf->peer_count > cap.max_tx_2_rx) {
2451                 RTE_ETHDEV_LOG(ERR,
2452                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2453                         conf->peer_count, cap.max_tx_2_rx);
2454                 return -EINVAL;
2455         }
2456         if (conf->peer_count == 0) {
2457                 RTE_ETHDEV_LOG(ERR,
2458                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2459                         conf->peer_count);
2460                 return -EINVAL;
2461         }
2462         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2463              cap.max_nb_queues != UINT16_MAX; i++) {
2464                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2465                         count++;
2466         }
2467         if (count > cap.max_nb_queues) {
2468                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2469                 cap.max_nb_queues);
2470                 return -EINVAL;
2471         }
2472         if (dev->data->dev_started)
2473                 return -EBUSY;
2474         eth_dev_txq_release(dev, tx_queue_id);
2475         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2476                 (dev, tx_queue_id, nb_tx_desc, conf);
2477         if (ret == 0)
2478                 dev->data->tx_queue_state[tx_queue_id] =
2479                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2480         return eth_err(port_id, ret);
2481 }
2482
2483 int
2484 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2485 {
2486         struct rte_eth_dev *dev;
2487         int ret;
2488
2489         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2490         dev = &rte_eth_devices[tx_port];
2491
2492         if (dev->data->dev_started == 0) {
2493                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2494                 return -EBUSY;
2495         }
2496
2497         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2498         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2499         if (ret != 0)
2500                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2501                                " to Rx %d (%d - all ports)\n",
2502                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2503
2504         return ret;
2505 }
2506
2507 int
2508 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2509 {
2510         struct rte_eth_dev *dev;
2511         int ret;
2512
2513         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2514         dev = &rte_eth_devices[tx_port];
2515
2516         if (dev->data->dev_started == 0) {
2517                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2518                 return -EBUSY;
2519         }
2520
2521         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2522         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2523         if (ret != 0)
2524                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2525                                " from Rx %d (%d - all ports)\n",
2526                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2527
2528         return ret;
2529 }
2530
2531 int
2532 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2533                                size_t len, uint32_t direction)
2534 {
2535         struct rte_eth_dev *dev;
2536         int ret;
2537
2538         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2539         dev = &rte_eth_devices[port_id];
2540
2541         if (peer_ports == NULL) {
2542                 RTE_ETHDEV_LOG(ERR,
2543                         "Cannot get ethdev port %u hairpin peer ports to NULL\n",
2544                         port_id);
2545                 return -EINVAL;
2546         }
2547
2548         if (len == 0) {
2549                 RTE_ETHDEV_LOG(ERR,
2550                         "Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2551                         port_id);
2552                 return -EINVAL;
2553         }
2554
2555         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2556                                 -ENOTSUP);
2557
2558         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2559                                                       len, direction);
2560         if (ret < 0)
2561                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2562                                port_id, direction ? "Rx" : "Tx");
2563
2564         return ret;
2565 }
2566
2567 void
2568 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2569                 void *userdata __rte_unused)
2570 {
2571         rte_pktmbuf_free_bulk(pkts, unsent);
2572 }
2573
2574 void
2575 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2576                 void *userdata)
2577 {
2578         uint64_t *count = userdata;
2579
2580         rte_pktmbuf_free_bulk(pkts, unsent);
2581         *count += unsent;
2582 }
2583
2584 int
2585 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2586                 buffer_tx_error_fn cbfn, void *userdata)
2587 {
2588         if (buffer == NULL) {
2589                 RTE_ETHDEV_LOG(ERR,
2590                         "Cannot set Tx buffer error callback to NULL buffer\n");
2591                 return -EINVAL;
2592         }
2593
2594         buffer->error_callback = cbfn;
2595         buffer->error_userdata = userdata;
2596         return 0;
2597 }
2598
2599 int
2600 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2601 {
2602         int ret = 0;
2603
2604         if (buffer == NULL) {
2605                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2606                 return -EINVAL;
2607         }
2608
2609         buffer->size = size;
2610         if (buffer->error_callback == NULL) {
2611                 ret = rte_eth_tx_buffer_set_err_callback(
2612                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2613         }
2614
2615         return ret;
2616 }
2617
2618 int
2619 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2620 {
2621         struct rte_eth_dev *dev;
2622         int ret;
2623
2624         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2625         dev = &rte_eth_devices[port_id];
2626
2627         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2628
2629         /* Call driver to free pending mbufs. */
2630         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2631                                                free_cnt);
2632         return eth_err(port_id, ret);
2633 }
2634
2635 int
2636 rte_eth_promiscuous_enable(uint16_t port_id)
2637 {
2638         struct rte_eth_dev *dev;
2639         int diag = 0;
2640
2641         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2642         dev = &rte_eth_devices[port_id];
2643
2644         if (dev->data->promiscuous == 1)
2645                 return 0;
2646
2647         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2648
2649         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2650         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2651
2652         return eth_err(port_id, diag);
2653 }
2654
2655 int
2656 rte_eth_promiscuous_disable(uint16_t port_id)
2657 {
2658         struct rte_eth_dev *dev;
2659         int diag = 0;
2660
2661         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2662         dev = &rte_eth_devices[port_id];
2663
2664         if (dev->data->promiscuous == 0)
2665                 return 0;
2666
2667         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2668
2669         dev->data->promiscuous = 0;
2670         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2671         if (diag != 0)
2672                 dev->data->promiscuous = 1;
2673
2674         return eth_err(port_id, diag);
2675 }
2676
2677 int
2678 rte_eth_promiscuous_get(uint16_t port_id)
2679 {
2680         struct rte_eth_dev *dev;
2681
2682         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2683         dev = &rte_eth_devices[port_id];
2684
2685         return dev->data->promiscuous;
2686 }
2687
2688 int
2689 rte_eth_allmulticast_enable(uint16_t port_id)
2690 {
2691         struct rte_eth_dev *dev;
2692         int diag;
2693
2694         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2695         dev = &rte_eth_devices[port_id];
2696
2697         if (dev->data->all_multicast == 1)
2698                 return 0;
2699
2700         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2701         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2702         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2703
2704         return eth_err(port_id, diag);
2705 }
2706
2707 int
2708 rte_eth_allmulticast_disable(uint16_t port_id)
2709 {
2710         struct rte_eth_dev *dev;
2711         int diag;
2712
2713         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2714         dev = &rte_eth_devices[port_id];
2715
2716         if (dev->data->all_multicast == 0)
2717                 return 0;
2718
2719         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2720         dev->data->all_multicast = 0;
2721         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2722         if (diag != 0)
2723                 dev->data->all_multicast = 1;
2724
2725         return eth_err(port_id, diag);
2726 }
2727
2728 int
2729 rte_eth_allmulticast_get(uint16_t port_id)
2730 {
2731         struct rte_eth_dev *dev;
2732
2733         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2734         dev = &rte_eth_devices[port_id];
2735
2736         return dev->data->all_multicast;
2737 }
2738
2739 int
2740 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2741 {
2742         struct rte_eth_dev *dev;
2743
2744         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2745         dev = &rte_eth_devices[port_id];
2746
2747         if (eth_link == NULL) {
2748                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2749                         port_id);
2750                 return -EINVAL;
2751         }
2752
2753         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2754                 rte_eth_linkstatus_get(dev, eth_link);
2755         else {
2756                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2757                 (*dev->dev_ops->link_update)(dev, 1);
2758                 *eth_link = dev->data->dev_link;
2759         }
2760
2761         return 0;
2762 }
2763
2764 int
2765 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2766 {
2767         struct rte_eth_dev *dev;
2768
2769         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2770         dev = &rte_eth_devices[port_id];
2771
2772         if (eth_link == NULL) {
2773                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2774                         port_id);
2775                 return -EINVAL;
2776         }
2777
2778         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2779                 rte_eth_linkstatus_get(dev, eth_link);
2780         else {
2781                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2782                 (*dev->dev_ops->link_update)(dev, 0);
2783                 *eth_link = dev->data->dev_link;
2784         }
2785
2786         return 0;
2787 }
2788
2789 const char *
2790 rte_eth_link_speed_to_str(uint32_t link_speed)
2791 {
2792         switch (link_speed) {
2793         case RTE_ETH_SPEED_NUM_NONE: return "None";
2794         case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
2795         case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
2796         case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
2797         case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2798         case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
2799         case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
2800         case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
2801         case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
2802         case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
2803         case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
2804         case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
2805         case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
2806         case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
2807         case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2808         default: return "Invalid";
2809         }
2810 }
2811
2812 int
2813 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2814 {
2815         if (str == NULL) {
2816                 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2817                 return -EINVAL;
2818         }
2819
2820         if (len == 0) {
2821                 RTE_ETHDEV_LOG(ERR,
2822                         "Cannot convert link to string with zero size\n");
2823                 return -EINVAL;
2824         }
2825
2826         if (eth_link == NULL) {
2827                 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2828                 return -EINVAL;
2829         }
2830
2831         if (eth_link->link_status == RTE_ETH_LINK_DOWN)
2832                 return snprintf(str, len, "Link down");
2833         else
2834                 return snprintf(str, len, "Link up at %s %s %s",
2835                         rte_eth_link_speed_to_str(eth_link->link_speed),
2836                         (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
2837                         "FDX" : "HDX",
2838                         (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
2839                         "Autoneg" : "Fixed");
2840 }
2841
2842 int
2843 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2844 {
2845         struct rte_eth_dev *dev;
2846
2847         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2848         dev = &rte_eth_devices[port_id];
2849
2850         if (stats == NULL) {
2851                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2852                         port_id);
2853                 return -EINVAL;
2854         }
2855
2856         memset(stats, 0, sizeof(*stats));
2857
2858         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2859         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2860         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2861 }
2862
2863 int
2864 rte_eth_stats_reset(uint16_t port_id)
2865 {
2866         struct rte_eth_dev *dev;
2867         int ret;
2868
2869         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2870         dev = &rte_eth_devices[port_id];
2871
2872         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2873         ret = (*dev->dev_ops->stats_reset)(dev);
2874         if (ret != 0)
2875                 return eth_err(port_id, ret);
2876
2877         dev->data->rx_mbuf_alloc_failed = 0;
2878
2879         return 0;
2880 }
2881
2882 static inline int
2883 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2884 {
2885         uint16_t nb_rxqs, nb_txqs;
2886         int count;
2887
2888         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2889         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2890
2891         count = RTE_NB_STATS;
2892         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2893                 count += nb_rxqs * RTE_NB_RXQ_STATS;
2894                 count += nb_txqs * RTE_NB_TXQ_STATS;
2895         }
2896
2897         return count;
2898 }
2899
2900 static int
2901 eth_dev_get_xstats_count(uint16_t port_id)
2902 {
2903         struct rte_eth_dev *dev;
2904         int count;
2905
2906         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2907         dev = &rte_eth_devices[port_id];
2908         if (dev->dev_ops->xstats_get_names != NULL) {
2909                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2910                 if (count < 0)
2911                         return eth_err(port_id, count);
2912         } else
2913                 count = 0;
2914
2915
2916         count += eth_dev_get_xstats_basic_count(dev);
2917
2918         return count;
2919 }
2920
2921 int
2922 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2923                 uint64_t *id)
2924 {
2925         int cnt_xstats, idx_xstat;
2926
2927         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2928
2929         if (xstat_name == NULL) {
2930                 RTE_ETHDEV_LOG(ERR,
2931                         "Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2932                         port_id);
2933                 return -ENOMEM;
2934         }
2935
2936         if (id == NULL) {
2937                 RTE_ETHDEV_LOG(ERR,
2938                         "Cannot get ethdev port %u xstats ID to NULL\n",
2939                         port_id);
2940                 return -ENOMEM;
2941         }
2942
2943         /* Get count */
2944         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2945         if (cnt_xstats  < 0) {
2946                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2947                 return -ENODEV;
2948         }
2949
2950         /* Get id-name lookup table */
2951         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2952
2953         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2954                         port_id, xstats_names, cnt_xstats, NULL)) {
2955                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2956                 return -1;
2957         }
2958
2959         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2960                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2961                         *id = idx_xstat;
2962                         return 0;
2963                 };
2964         }
2965
2966         return -EINVAL;
2967 }
2968
2969 /* retrieve basic stats names */
2970 static int
2971 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2972         struct rte_eth_xstat_name *xstats_names)
2973 {
2974         int cnt_used_entries = 0;
2975         uint32_t idx, id_queue;
2976         uint16_t num_q;
2977
2978         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2979                 strlcpy(xstats_names[cnt_used_entries].name,
2980                         eth_dev_stats_strings[idx].name,
2981                         sizeof(xstats_names[0].name));
2982                 cnt_used_entries++;
2983         }
2984
2985         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2986                 return cnt_used_entries;
2987
2988         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2989         for (id_queue = 0; id_queue < num_q; id_queue++) {
2990                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2991                         snprintf(xstats_names[cnt_used_entries].name,
2992                                 sizeof(xstats_names[0].name),
2993                                 "rx_q%u_%s",
2994                                 id_queue, eth_dev_rxq_stats_strings[idx].name);
2995                         cnt_used_entries++;
2996                 }
2997
2998         }
2999         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3000         for (id_queue = 0; id_queue < num_q; id_queue++) {
3001                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
3002                         snprintf(xstats_names[cnt_used_entries].name,
3003                                 sizeof(xstats_names[0].name),
3004                                 "tx_q%u_%s",
3005                                 id_queue, eth_dev_txq_stats_strings[idx].name);
3006                         cnt_used_entries++;
3007                 }
3008         }
3009         return cnt_used_entries;
3010 }
3011
3012 /* retrieve ethdev extended statistics names */
3013 int
3014 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3015         struct rte_eth_xstat_name *xstats_names, unsigned int size,
3016         uint64_t *ids)
3017 {
3018         struct rte_eth_xstat_name *xstats_names_copy;
3019         unsigned int no_basic_stat_requested = 1;
3020         unsigned int no_ext_stat_requested = 1;
3021         unsigned int expected_entries;
3022         unsigned int basic_count;
3023         struct rte_eth_dev *dev;
3024         unsigned int i;
3025         int ret;
3026
3027         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3028         dev = &rte_eth_devices[port_id];
3029
3030         basic_count = eth_dev_get_xstats_basic_count(dev);
3031         ret = eth_dev_get_xstats_count(port_id);
3032         if (ret < 0)
3033                 return ret;
3034         expected_entries = (unsigned int)ret;
3035
3036         /* Return max number of stats if no ids given */
3037         if (!ids) {
3038                 if (!xstats_names)
3039                         return expected_entries;
3040                 else if (xstats_names && size < expected_entries)
3041                         return expected_entries;
3042         }
3043
3044         if (ids && !xstats_names)
3045                 return -EINVAL;
3046
3047         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
3048                 uint64_t ids_copy[size];
3049
3050                 for (i = 0; i < size; i++) {
3051                         if (ids[i] < basic_count) {
3052                                 no_basic_stat_requested = 0;
3053                                 break;
3054                         }
3055
3056                         /*
3057                          * Convert ids to xstats ids that PMD knows.
3058                          * ids known by user are basic + extended stats.
3059                          */
3060                         ids_copy[i] = ids[i] - basic_count;
3061                 }
3062
3063                 if (no_basic_stat_requested)
3064                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
3065                                         ids_copy, xstats_names, size);
3066         }
3067
3068         /* Retrieve all stats */
3069         if (!ids) {
3070                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
3071                                 expected_entries);
3072                 if (num_stats < 0 || num_stats > (int)expected_entries)
3073                         return num_stats;
3074                 else
3075                         return expected_entries;
3076         }
3077
3078         xstats_names_copy = calloc(expected_entries,
3079                 sizeof(struct rte_eth_xstat_name));
3080
3081         if (!xstats_names_copy) {
3082                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
3083                 return -ENOMEM;
3084         }
3085
3086         if (ids) {
3087                 for (i = 0; i < size; i++) {
3088                         if (ids[i] >= basic_count) {
3089                                 no_ext_stat_requested = 0;
3090                                 break;
3091                         }
3092                 }
3093         }
3094
3095         /* Fill xstats_names_copy structure */
3096         if (ids && no_ext_stat_requested) {
3097                 eth_basic_stats_get_names(dev, xstats_names_copy);
3098         } else {
3099                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
3100                         expected_entries);
3101                 if (ret < 0) {
3102                         free(xstats_names_copy);
3103                         return ret;
3104                 }
3105         }
3106
3107         /* Filter stats */
3108         for (i = 0; i < size; i++) {
3109                 if (ids[i] >= expected_entries) {
3110                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3111                         free(xstats_names_copy);
3112                         return -1;
3113                 }
3114                 xstats_names[i] = xstats_names_copy[ids[i]];
3115         }
3116
3117         free(xstats_names_copy);
3118         return size;
3119 }
3120
3121 int
3122 rte_eth_xstats_get_names(uint16_t port_id,
3123         struct rte_eth_xstat_name *xstats_names,
3124         unsigned int size)
3125 {
3126         struct rte_eth_dev *dev;
3127         int cnt_used_entries;
3128         int cnt_expected_entries;
3129         int cnt_driver_entries;
3130
3131         cnt_expected_entries = eth_dev_get_xstats_count(port_id);
3132         if (xstats_names == NULL || cnt_expected_entries < 0 ||
3133                         (int)size < cnt_expected_entries)
3134                 return cnt_expected_entries;
3135
3136         /* port_id checked in eth_dev_get_xstats_count() */
3137         dev = &rte_eth_devices[port_id];
3138
3139         cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3140
3141         if (dev->dev_ops->xstats_get_names != NULL) {
3142                 /* If there are any driver-specific xstats, append them
3143                  * to end of list.
3144                  */
3145                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3146                         dev,
3147                         xstats_names + cnt_used_entries,
3148                         size - cnt_used_entries);
3149                 if (cnt_driver_entries < 0)
3150                         return eth_err(port_id, cnt_driver_entries);
3151                 cnt_used_entries += cnt_driver_entries;
3152         }
3153
3154         return cnt_used_entries;
3155 }
3156
3157
3158 static int
3159 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
3160 {
3161         struct rte_eth_dev *dev;
3162         struct rte_eth_stats eth_stats;
3163         unsigned int count = 0, i, q;
3164         uint64_t val, *stats_ptr;
3165         uint16_t nb_rxqs, nb_txqs;
3166         int ret;
3167
3168         ret = rte_eth_stats_get(port_id, &eth_stats);
3169         if (ret < 0)
3170                 return ret;
3171
3172         dev = &rte_eth_devices[port_id];
3173
3174         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3175         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3176
3177         /* global stats */
3178         for (i = 0; i < RTE_NB_STATS; i++) {
3179                 stats_ptr = RTE_PTR_ADD(&eth_stats,
3180                                         eth_dev_stats_strings[i].offset);
3181                 val = *stats_ptr;
3182                 xstats[count++].value = val;
3183         }
3184
3185         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3186                 return count;
3187
3188         /* per-rxq stats */
3189         for (q = 0; q < nb_rxqs; q++) {
3190                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3191                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3192                                         eth_dev_rxq_stats_strings[i].offset +
3193                                         q * sizeof(uint64_t));
3194                         val = *stats_ptr;
3195                         xstats[count++].value = val;
3196                 }
3197         }
3198
3199         /* per-txq stats */
3200         for (q = 0; q < nb_txqs; q++) {
3201                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3202                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3203                                         eth_dev_txq_stats_strings[i].offset +
3204                                         q * sizeof(uint64_t));
3205                         val = *stats_ptr;
3206                         xstats[count++].value = val;
3207                 }
3208         }
3209         return count;
3210 }
3211
3212 /* retrieve ethdev extended statistics */
3213 int
3214 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3215                          uint64_t *values, unsigned int size)
3216 {
3217         unsigned int no_basic_stat_requested = 1;
3218         unsigned int no_ext_stat_requested = 1;
3219         unsigned int num_xstats_filled;
3220         unsigned int basic_count;
3221         uint16_t expected_entries;
3222         struct rte_eth_dev *dev;
3223         unsigned int i;
3224         int ret;
3225
3226         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3227         dev = &rte_eth_devices[port_id];
3228
3229         ret = eth_dev_get_xstats_count(port_id);
3230         if (ret < 0)
3231                 return ret;
3232         expected_entries = (uint16_t)ret;
3233         struct rte_eth_xstat xstats[expected_entries];
3234         basic_count = eth_dev_get_xstats_basic_count(dev);
3235
3236         /* Return max number of stats if no ids given */
3237         if (!ids) {
3238                 if (!values)
3239                         return expected_entries;
3240                 else if (values && size < expected_entries)
3241                         return expected_entries;
3242         }
3243
3244         if (ids && !values)
3245                 return -EINVAL;
3246
3247         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3248                 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3249                 uint64_t ids_copy[size];
3250
3251                 for (i = 0; i < size; i++) {
3252                         if (ids[i] < basic_count) {
3253                                 no_basic_stat_requested = 0;
3254                                 break;
3255                         }
3256
3257                         /*
3258                          * Convert ids to xstats ids that PMD knows.
3259                          * ids known by user are basic + extended stats.
3260                          */
3261                         ids_copy[i] = ids[i] - basic_count;
3262                 }
3263
3264                 if (no_basic_stat_requested)
3265                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3266                                         values, size);
3267         }
3268
3269         if (ids) {
3270                 for (i = 0; i < size; i++) {
3271                         if (ids[i] >= basic_count) {
3272                                 no_ext_stat_requested = 0;
3273                                 break;
3274                         }
3275                 }
3276         }
3277
3278         /* Fill the xstats structure */
3279         if (ids && no_ext_stat_requested)
3280                 ret = eth_basic_stats_get(port_id, xstats);
3281         else
3282                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3283
3284         if (ret < 0)
3285                 return ret;
3286         num_xstats_filled = (unsigned int)ret;
3287
3288         /* Return all stats */
3289         if (!ids) {
3290                 for (i = 0; i < num_xstats_filled; i++)
3291                         values[i] = xstats[i].value;
3292                 return expected_entries;
3293         }
3294
3295         /* Filter stats */
3296         for (i = 0; i < size; i++) {
3297                 if (ids[i] >= expected_entries) {
3298                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3299                         return -1;
3300                 }
3301                 values[i] = xstats[ids[i]].value;
3302         }
3303         return size;
3304 }
3305
3306 int
3307 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3308         unsigned int n)
3309 {
3310         struct rte_eth_dev *dev;
3311         unsigned int count = 0, i;
3312         signed int xcount = 0;
3313         uint16_t nb_rxqs, nb_txqs;
3314         int ret;
3315
3316         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3317         dev = &rte_eth_devices[port_id];
3318
3319         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3320         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3321
3322         /* Return generic statistics */
3323         count = RTE_NB_STATS;
3324         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3325                 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3326
3327         /* implemented by the driver */
3328         if (dev->dev_ops->xstats_get != NULL) {
3329                 /* Retrieve the xstats from the driver at the end of the
3330                  * xstats struct.
3331                  */
3332                 xcount = (*dev->dev_ops->xstats_get)(dev,
3333                                      xstats ? xstats + count : NULL,
3334                                      (n > count) ? n - count : 0);
3335
3336                 if (xcount < 0)
3337                         return eth_err(port_id, xcount);
3338         }
3339
3340         if (n < count + xcount || xstats == NULL)
3341                 return count + xcount;
3342
3343         /* now fill the xstats structure */
3344         ret = eth_basic_stats_get(port_id, xstats);
3345         if (ret < 0)
3346                 return ret;
3347         count = ret;
3348
3349         for (i = 0; i < count; i++)
3350                 xstats[i].id = i;
3351         /* add an offset to driver-specific stats */
3352         for ( ; i < count + xcount; i++)
3353                 xstats[i].id += count;
3354
3355         return count + xcount;
3356 }
3357
3358 /* reset ethdev extended statistics */
3359 int
3360 rte_eth_xstats_reset(uint16_t port_id)
3361 {
3362         struct rte_eth_dev *dev;
3363
3364         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3365         dev = &rte_eth_devices[port_id];
3366
3367         /* implemented by the driver */
3368         if (dev->dev_ops->xstats_reset != NULL)
3369                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3370
3371         /* fallback to default */
3372         return rte_eth_stats_reset(port_id);
3373 }
3374
3375 static int
3376 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3377                 uint8_t stat_idx, uint8_t is_rx)
3378 {
3379         struct rte_eth_dev *dev;
3380
3381         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3382         dev = &rte_eth_devices[port_id];
3383
3384         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3385                 return -EINVAL;
3386
3387         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3388                 return -EINVAL;
3389
3390         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3391                 return -EINVAL;
3392
3393         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3394         return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3395 }
3396
3397 int
3398 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3399                 uint8_t stat_idx)
3400 {
3401         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3402                                                 tx_queue_id,
3403                                                 stat_idx, STAT_QMAP_TX));
3404 }
3405
3406 int
3407 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3408                 uint8_t stat_idx)
3409 {
3410         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3411                                                 rx_queue_id,
3412                                                 stat_idx, STAT_QMAP_RX));
3413 }
3414
3415 int
3416 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3417 {
3418         struct rte_eth_dev *dev;
3419
3420         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3421         dev = &rte_eth_devices[port_id];
3422
3423         if (fw_version == NULL && fw_size > 0) {
3424                 RTE_ETHDEV_LOG(ERR,
3425                         "Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3426                         port_id);
3427                 return -EINVAL;
3428         }
3429
3430         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3431         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3432                                                         fw_version, fw_size));
3433 }
3434
3435 int
3436 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3437 {
3438         struct rte_eth_dev *dev;
3439         const struct rte_eth_desc_lim lim = {
3440                 .nb_max = UINT16_MAX,
3441                 .nb_min = 0,
3442                 .nb_align = 1,
3443                 .nb_seg_max = UINT16_MAX,
3444                 .nb_mtu_seg_max = UINT16_MAX,
3445         };
3446         int diag;
3447
3448         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3449         dev = &rte_eth_devices[port_id];
3450
3451         if (dev_info == NULL) {
3452                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3453                         port_id);
3454                 return -EINVAL;
3455         }
3456
3457         /*
3458          * Init dev_info before port_id check since caller does not have
3459          * return status and does not know if get is successful or not.
3460          */
3461         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3462         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3463
3464         dev_info->rx_desc_lim = lim;
3465         dev_info->tx_desc_lim = lim;
3466         dev_info->device = dev->device;
3467         dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
3468                 RTE_ETHER_CRC_LEN;
3469         dev_info->max_mtu = UINT16_MAX;
3470
3471         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3472         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3473         if (diag != 0) {
3474                 /* Cleanup already filled in device information */
3475                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3476                 return eth_err(port_id, diag);
3477         }
3478
3479         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3480         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3481                         RTE_MAX_QUEUES_PER_PORT);
3482         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3483                         RTE_MAX_QUEUES_PER_PORT);
3484
3485         dev_info->driver_name = dev->device->driver->name;
3486         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3487         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3488
3489         dev_info->dev_flags = &dev->data->dev_flags;
3490
3491         return 0;
3492 }
3493
3494 int
3495 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3496 {
3497         struct rte_eth_dev *dev;
3498
3499         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3500         dev = &rte_eth_devices[port_id];
3501
3502         if (dev_conf == NULL) {
3503                 RTE_ETHDEV_LOG(ERR,
3504                         "Cannot get ethdev port %u configuration to NULL\n",
3505                         port_id);
3506                 return -EINVAL;
3507         }
3508
3509         memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3510
3511         return 0;
3512 }
3513
3514 int
3515 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3516                                  uint32_t *ptypes, int num)
3517 {
3518         int i, j;
3519         struct rte_eth_dev *dev;
3520         const uint32_t *all_ptypes;
3521
3522         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3523         dev = &rte_eth_devices[port_id];
3524
3525         if (ptypes == NULL && num > 0) {
3526                 RTE_ETHDEV_LOG(ERR,
3527                         "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3528                         port_id);
3529                 return -EINVAL;
3530         }
3531
3532         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3533         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3534
3535         if (!all_ptypes)
3536                 return 0;
3537
3538         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3539                 if (all_ptypes[i] & ptype_mask) {
3540                         if (j < num)
3541                                 ptypes[j] = all_ptypes[i];
3542                         j++;
3543                 }
3544
3545         return j;
3546 }
3547
3548 int
3549 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3550                                  uint32_t *set_ptypes, unsigned int num)
3551 {
3552         const uint32_t valid_ptype_masks[] = {
3553                 RTE_PTYPE_L2_MASK,
3554                 RTE_PTYPE_L3_MASK,
3555                 RTE_PTYPE_L4_MASK,
3556                 RTE_PTYPE_TUNNEL_MASK,
3557                 RTE_PTYPE_INNER_L2_MASK,
3558                 RTE_PTYPE_INNER_L3_MASK,
3559                 RTE_PTYPE_INNER_L4_MASK,
3560         };
3561         const uint32_t *all_ptypes;
3562         struct rte_eth_dev *dev;
3563         uint32_t unused_mask;
3564         unsigned int i, j;
3565         int ret;
3566
3567         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3568         dev = &rte_eth_devices[port_id];
3569
3570         if (num > 0 && set_ptypes == NULL) {
3571                 RTE_ETHDEV_LOG(ERR,
3572                         "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3573                         port_id);
3574                 return -EINVAL;
3575         }
3576
3577         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3578                         *dev->dev_ops->dev_ptypes_set == NULL) {
3579                 ret = 0;
3580                 goto ptype_unknown;
3581         }
3582
3583         if (ptype_mask == 0) {
3584                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3585                                 ptype_mask);
3586                 goto ptype_unknown;
3587         }
3588
3589         unused_mask = ptype_mask;
3590         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3591                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3592                 if (mask && mask != valid_ptype_masks[i]) {
3593                         ret = -EINVAL;
3594                         goto ptype_unknown;
3595                 }
3596                 unused_mask &= ~valid_ptype_masks[i];
3597         }
3598
3599         if (unused_mask) {
3600                 ret = -EINVAL;
3601                 goto ptype_unknown;
3602         }
3603
3604         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3605         if (all_ptypes == NULL) {
3606                 ret = 0;
3607                 goto ptype_unknown;
3608         }
3609
3610         /*
3611          * Accommodate as many set_ptypes as possible. If the supplied
3612          * set_ptypes array is insufficient fill it partially.
3613          */
3614         for (i = 0, j = 0; set_ptypes != NULL &&
3615                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3616                 if (ptype_mask & all_ptypes[i]) {
3617                         if (j < num - 1) {
3618                                 set_ptypes[j] = all_ptypes[i];
3619                                 j++;
3620                                 continue;
3621                         }
3622                         break;
3623                 }
3624         }
3625
3626         if (set_ptypes != NULL && j < num)
3627                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3628
3629         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3630
3631 ptype_unknown:
3632         if (num > 0)
3633                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3634
3635         return ret;
3636 }
3637
3638 int
3639 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3640         unsigned int num)
3641 {
3642         int32_t ret;
3643         struct rte_eth_dev *dev;
3644         struct rte_eth_dev_info dev_info;
3645
3646         if (ma == NULL) {
3647                 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__);
3648                 return -EINVAL;
3649         }
3650
3651         /* will check for us that port_id is a valid one */
3652         ret = rte_eth_dev_info_get(port_id, &dev_info);
3653         if (ret != 0)
3654                 return ret;
3655
3656         dev = &rte_eth_devices[port_id];
3657         num = RTE_MIN(dev_info.max_mac_addrs, num);
3658         memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
3659
3660         return num;
3661 }
3662
3663 int
3664 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3665 {
3666         struct rte_eth_dev *dev;
3667
3668         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3669         dev = &rte_eth_devices[port_id];
3670
3671         if (mac_addr == NULL) {
3672                 RTE_ETHDEV_LOG(ERR,
3673                         "Cannot get ethdev port %u MAC address to NULL\n",
3674                         port_id);
3675                 return -EINVAL;
3676         }
3677
3678         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3679
3680         return 0;
3681 }
3682
3683 int
3684 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3685 {
3686         struct rte_eth_dev *dev;
3687
3688         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3689         dev = &rte_eth_devices[port_id];
3690
3691         if (mtu == NULL) {
3692                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3693                         port_id);
3694                 return -EINVAL;
3695         }
3696
3697         *mtu = dev->data->mtu;
3698         return 0;
3699 }
3700
3701 int
3702 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3703 {
3704         int ret;
3705         struct rte_eth_dev_info dev_info;
3706         struct rte_eth_dev *dev;
3707
3708         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3709         dev = &rte_eth_devices[port_id];
3710         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3711
3712         /*
3713          * Check if the device supports dev_infos_get, if it does not
3714          * skip min_mtu/max_mtu validation here as this requires values
3715          * that are populated within the call to rte_eth_dev_info_get()
3716          * which relies on dev->dev_ops->dev_infos_get.
3717          */
3718         if (*dev->dev_ops->dev_infos_get != NULL) {
3719                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3720                 if (ret != 0)
3721                         return ret;
3722
3723                 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
3724                 if (ret != 0)
3725                         return ret;
3726         }
3727
3728         if (dev->data->dev_configured == 0) {
3729                 RTE_ETHDEV_LOG(ERR,
3730                         "Port %u must be configured before MTU set\n",
3731                         port_id);
3732                 return -EINVAL;
3733         }
3734
3735         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3736         if (ret == 0)
3737                 dev->data->mtu = mtu;
3738
3739         return eth_err(port_id, ret);
3740 }
3741
3742 int
3743 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3744 {
3745         struct rte_eth_dev *dev;
3746         int ret;
3747
3748         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3749         dev = &rte_eth_devices[port_id];
3750
3751         if (!(dev->data->dev_conf.rxmode.offloads &
3752               RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
3753                 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
3754                         port_id);
3755                 return -ENOSYS;
3756         }
3757
3758         if (vlan_id > 4095) {
3759                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3760                         port_id, vlan_id);
3761                 return -EINVAL;
3762         }
3763         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3764
3765         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3766         if (ret == 0) {
3767                 struct rte_vlan_filter_conf *vfc;
3768                 int vidx;
3769                 int vbit;
3770
3771                 vfc = &dev->data->vlan_filter_conf;
3772                 vidx = vlan_id / 64;
3773                 vbit = vlan_id % 64;
3774
3775                 if (on)
3776                         vfc->ids[vidx] |= RTE_BIT64(vbit);
3777                 else
3778                         vfc->ids[vidx] &= ~RTE_BIT64(vbit);
3779         }
3780
3781         return eth_err(port_id, ret);
3782 }
3783
3784 int
3785 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3786                                     int on)
3787 {
3788         struct rte_eth_dev *dev;
3789
3790         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3791         dev = &rte_eth_devices[port_id];
3792
3793         if (rx_queue_id >= dev->data->nb_rx_queues) {
3794                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3795                 return -EINVAL;
3796         }
3797
3798         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3799         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3800
3801         return 0;
3802 }
3803
3804 int
3805 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3806                                 enum rte_vlan_type vlan_type,
3807                                 uint16_t tpid)
3808 {
3809         struct rte_eth_dev *dev;
3810
3811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3812         dev = &rte_eth_devices[port_id];
3813
3814         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3815         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3816                                                                tpid));
3817 }
3818
3819 int
3820 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3821 {
3822         struct rte_eth_dev_info dev_info;
3823         struct rte_eth_dev *dev;
3824         int ret = 0;
3825         int mask = 0;
3826         int cur, org = 0;
3827         uint64_t orig_offloads;
3828         uint64_t dev_offloads;
3829         uint64_t new_offloads;
3830
3831         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3832         dev = &rte_eth_devices[port_id];
3833
3834         /* save original values in case of failure */
3835         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3836         dev_offloads = orig_offloads;
3837
3838         /* check which option changed by application */
3839         cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
3840         org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
3841         if (cur != org) {
3842                 if (cur)
3843                         dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3844                 else
3845                         dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3846                 mask |= RTE_ETH_VLAN_STRIP_MASK;
3847         }
3848
3849         cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
3850         org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
3851         if (cur != org) {
3852                 if (cur)
3853                         dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3854                 else
3855                         dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3856                 mask |= RTE_ETH_VLAN_FILTER_MASK;
3857         }
3858
3859         cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
3860         org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
3861         if (cur != org) {
3862                 if (cur)
3863                         dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3864                 else
3865                         dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3866                 mask |= RTE_ETH_VLAN_EXTEND_MASK;
3867         }
3868
3869         cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
3870         org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
3871         if (cur != org) {
3872                 if (cur)
3873                         dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3874                 else
3875                         dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3876                 mask |= RTE_ETH_QINQ_STRIP_MASK;
3877         }
3878
3879         /*no change*/
3880         if (mask == 0)
3881                 return ret;
3882
3883         ret = rte_eth_dev_info_get(port_id, &dev_info);
3884         if (ret != 0)
3885                 return ret;
3886
3887         /* Rx VLAN offloading must be within its device capabilities */
3888         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3889                 new_offloads = dev_offloads & ~orig_offloads;
3890                 RTE_ETHDEV_LOG(ERR,
3891                         "Ethdev port_id=%u requested new added VLAN offloads "
3892                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3893                         "0x%" PRIx64 " in %s()\n",
3894                         port_id, new_offloads, dev_info.rx_offload_capa,
3895                         __func__);
3896                 return -EINVAL;
3897         }
3898
3899         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3900         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3901         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3902         if (ret) {
3903                 /* hit an error restore  original values */
3904                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3905         }
3906
3907         return eth_err(port_id, ret);
3908 }
3909
3910 int
3911 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3912 {
3913         struct rte_eth_dev *dev;
3914         uint64_t *dev_offloads;
3915         int ret = 0;
3916
3917         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3918         dev = &rte_eth_devices[port_id];
3919         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3920
3921         if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
3922                 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
3923
3924         if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
3925                 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
3926
3927         if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
3928                 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
3929
3930         if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
3931                 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
3932
3933         return ret;
3934 }
3935
3936 int
3937 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3938 {
3939         struct rte_eth_dev *dev;
3940
3941         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3942         dev = &rte_eth_devices[port_id];
3943
3944         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3945         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3946 }
3947
3948 int
3949 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3950 {
3951         struct rte_eth_dev *dev;
3952
3953         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3954         dev = &rte_eth_devices[port_id];
3955
3956         if (fc_conf == NULL) {
3957                 RTE_ETHDEV_LOG(ERR,
3958                         "Cannot get ethdev port %u flow control config to NULL\n",
3959                         port_id);
3960                 return -EINVAL;
3961         }
3962
3963         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3964         memset(fc_conf, 0, sizeof(*fc_conf));
3965         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3966 }
3967
3968 int
3969 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3970 {
3971         struct rte_eth_dev *dev;
3972
3973         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3974         dev = &rte_eth_devices[port_id];
3975
3976         if (fc_conf == NULL) {
3977                 RTE_ETHDEV_LOG(ERR,
3978                         "Cannot set ethdev port %u flow control from NULL config\n",
3979                         port_id);
3980                 return -EINVAL;
3981         }
3982
3983         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3984                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3985                 return -EINVAL;
3986         }
3987
3988         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3989         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3990 }
3991
3992 int
3993 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3994                                    struct rte_eth_pfc_conf *pfc_conf)
3995 {
3996         struct rte_eth_dev *dev;
3997
3998         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3999         dev = &rte_eth_devices[port_id];
4000
4001         if (pfc_conf == NULL) {
4002                 RTE_ETHDEV_LOG(ERR,
4003                         "Cannot set ethdev port %u priority flow control from NULL config\n",
4004                         port_id);
4005                 return -EINVAL;
4006         }
4007
4008         if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
4009                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
4010                 return -EINVAL;
4011         }
4012
4013         /* High water, low water validation are device specific */
4014         if  (*dev->dev_ops->priority_flow_ctrl_set)
4015                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
4016                                         (dev, pfc_conf));
4017         return -ENOTSUP;
4018 }
4019
4020 static int
4021 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
4022                         uint16_t reta_size)
4023 {
4024         uint16_t i, num;
4025
4026         num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
4027         for (i = 0; i < num; i++) {
4028                 if (reta_conf[i].mask)
4029                         return 0;
4030         }
4031
4032         return -EINVAL;
4033 }
4034
4035 static int
4036 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
4037                          uint16_t reta_size,
4038                          uint16_t max_rxq)
4039 {
4040         uint16_t i, idx, shift;
4041
4042         if (max_rxq == 0) {
4043                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
4044                 return -EINVAL;
4045         }
4046
4047         for (i = 0; i < reta_size; i++) {
4048                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
4049                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
4050                 if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
4051                         (reta_conf[idx].reta[shift] >= max_rxq)) {
4052                         RTE_ETHDEV_LOG(ERR,
4053                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
4054                                 idx, shift,
4055                                 reta_conf[idx].reta[shift], max_rxq);
4056                         return -EINVAL;
4057                 }
4058         }
4059
4060         return 0;
4061 }
4062
4063 int
4064 rte_eth_dev_rss_reta_update(uint16_t port_id,
4065                             struct rte_eth_rss_reta_entry64 *reta_conf,
4066                             uint16_t reta_size)
4067 {
4068         struct rte_eth_dev *dev;
4069         int ret;
4070
4071         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4072         dev = &rte_eth_devices[port_id];
4073
4074         if (reta_conf == NULL) {
4075                 RTE_ETHDEV_LOG(ERR,
4076                         "Cannot update ethdev port %u RSS RETA to NULL\n",
4077                         port_id);
4078                 return -EINVAL;
4079         }
4080
4081         if (reta_size == 0) {
4082                 RTE_ETHDEV_LOG(ERR,
4083                         "Cannot update ethdev port %u RSS RETA with zero size\n",
4084                         port_id);
4085                 return -EINVAL;
4086         }
4087
4088         /* Check mask bits */
4089         ret = eth_check_reta_mask(reta_conf, reta_size);
4090         if (ret < 0)
4091                 return ret;
4092
4093         /* Check entry value */
4094         ret = eth_check_reta_entry(reta_conf, reta_size,
4095                                 dev->data->nb_rx_queues);
4096         if (ret < 0)
4097                 return ret;
4098
4099         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
4100         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
4101                                                              reta_size));
4102 }
4103
4104 int
4105 rte_eth_dev_rss_reta_query(uint16_t port_id,
4106                            struct rte_eth_rss_reta_entry64 *reta_conf,
4107                            uint16_t reta_size)
4108 {
4109         struct rte_eth_dev *dev;
4110         int ret;
4111
4112         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4113         dev = &rte_eth_devices[port_id];
4114
4115         if (reta_conf == NULL) {
4116                 RTE_ETHDEV_LOG(ERR,
4117                         "Cannot query ethdev port %u RSS RETA from NULL config\n",
4118                         port_id);
4119                 return -EINVAL;
4120         }
4121
4122         /* Check mask bits */
4123         ret = eth_check_reta_mask(reta_conf, reta_size);
4124         if (ret < 0)
4125                 return ret;
4126
4127         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
4128         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4129                                                             reta_size));
4130 }
4131
4132 int
4133 rte_eth_dev_rss_hash_update(uint16_t port_id,
4134                             struct rte_eth_rss_conf *rss_conf)
4135 {
4136         struct rte_eth_dev *dev;
4137         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
4138         int ret;
4139
4140         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4141         dev = &rte_eth_devices[port_id];
4142
4143         if (rss_conf == NULL) {
4144                 RTE_ETHDEV_LOG(ERR,
4145                         "Cannot update ethdev port %u RSS hash from NULL config\n",
4146                         port_id);
4147                 return -EINVAL;
4148         }
4149
4150         ret = rte_eth_dev_info_get(port_id, &dev_info);
4151         if (ret != 0)
4152                 return ret;
4153
4154         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4155         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4156             dev_info.flow_type_rss_offloads) {
4157                 RTE_ETHDEV_LOG(ERR,
4158                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4159                         port_id, rss_conf->rss_hf,
4160                         dev_info.flow_type_rss_offloads);
4161                 return -EINVAL;
4162         }
4163         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
4164         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4165                                                                  rss_conf));
4166 }
4167
4168 int
4169 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4170                               struct rte_eth_rss_conf *rss_conf)
4171 {
4172         struct rte_eth_dev *dev;
4173
4174         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4175         dev = &rte_eth_devices[port_id];
4176
4177         if (rss_conf == NULL) {
4178                 RTE_ETHDEV_LOG(ERR,
4179                         "Cannot get ethdev port %u RSS hash config to NULL\n",
4180                         port_id);
4181                 return -EINVAL;
4182         }
4183
4184         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
4185         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4186                                                                    rss_conf));
4187 }
4188
4189 int
4190 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4191                                 struct rte_eth_udp_tunnel *udp_tunnel)
4192 {
4193         struct rte_eth_dev *dev;
4194
4195         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4196         dev = &rte_eth_devices[port_id];
4197
4198         if (udp_tunnel == NULL) {
4199                 RTE_ETHDEV_LOG(ERR,
4200                         "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4201                         port_id);
4202                 return -EINVAL;
4203         }
4204
4205         if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4206                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4207                 return -EINVAL;
4208         }
4209
4210         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
4211         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4212                                                                 udp_tunnel));
4213 }
4214
4215 int
4216 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4217                                    struct rte_eth_udp_tunnel *udp_tunnel)
4218 {
4219         struct rte_eth_dev *dev;
4220
4221         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4222         dev = &rte_eth_devices[port_id];
4223
4224         if (udp_tunnel == NULL) {
4225                 RTE_ETHDEV_LOG(ERR,
4226                         "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4227                         port_id);
4228                 return -EINVAL;
4229         }
4230
4231         if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4232                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4233                 return -EINVAL;
4234         }
4235
4236         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
4237         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4238                                                                 udp_tunnel));
4239 }
4240
4241 int
4242 rte_eth_led_on(uint16_t port_id)
4243 {
4244         struct rte_eth_dev *dev;
4245
4246         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4247         dev = &rte_eth_devices[port_id];
4248
4249         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
4250         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4251 }
4252
4253 int
4254 rte_eth_led_off(uint16_t port_id)
4255 {
4256         struct rte_eth_dev *dev;
4257
4258         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4259         dev = &rte_eth_devices[port_id];
4260
4261         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
4262         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4263 }
4264
4265 int
4266 rte_eth_fec_get_capability(uint16_t port_id,
4267                            struct rte_eth_fec_capa *speed_fec_capa,
4268                            unsigned int num)
4269 {
4270         struct rte_eth_dev *dev;
4271         int ret;
4272
4273         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4274         dev = &rte_eth_devices[port_id];
4275
4276         if (speed_fec_capa == NULL && num > 0) {
4277                 RTE_ETHDEV_LOG(ERR,
4278                         "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4279                         port_id);
4280                 return -EINVAL;
4281         }
4282
4283         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
4284         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4285
4286         return ret;
4287 }
4288
4289 int
4290 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4291 {
4292         struct rte_eth_dev *dev;
4293
4294         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4295         dev = &rte_eth_devices[port_id];
4296
4297         if (fec_capa == NULL) {
4298                 RTE_ETHDEV_LOG(ERR,
4299                         "Cannot get ethdev port %u current FEC mode to NULL\n",
4300                         port_id);
4301                 return -EINVAL;
4302         }
4303
4304         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
4305         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4306 }
4307
4308 int
4309 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4310 {
4311         struct rte_eth_dev *dev;
4312
4313         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4314         dev = &rte_eth_devices[port_id];
4315
4316         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
4317         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4318 }
4319
4320 /*
4321  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4322  * an empty spot.
4323  */
4324 static int
4325 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4326 {
4327         struct rte_eth_dev_info dev_info;
4328         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4329         unsigned i;
4330         int ret;
4331
4332         ret = rte_eth_dev_info_get(port_id, &dev_info);
4333         if (ret != 0)
4334                 return -1;
4335
4336         for (i = 0; i < dev_info.max_mac_addrs; i++)
4337                 if (memcmp(addr, &dev->data->mac_addrs[i],
4338                                 RTE_ETHER_ADDR_LEN) == 0)
4339                         return i;
4340
4341         return -1;
4342 }
4343
4344 static const struct rte_ether_addr null_mac_addr;
4345
4346 int
4347 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4348                         uint32_t pool)
4349 {
4350         struct rte_eth_dev *dev;
4351         int index;
4352         uint64_t pool_mask;
4353         int ret;
4354
4355         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4356         dev = &rte_eth_devices[port_id];
4357
4358         if (addr == NULL) {
4359                 RTE_ETHDEV_LOG(ERR,
4360                         "Cannot add ethdev port %u MAC address from NULL address\n",
4361                         port_id);
4362                 return -EINVAL;
4363         }
4364
4365         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4366
4367         if (rte_is_zero_ether_addr(addr)) {
4368                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4369                         port_id);
4370                 return -EINVAL;
4371         }
4372         if (pool >= RTE_ETH_64_POOLS) {
4373                 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
4374                 return -EINVAL;
4375         }
4376
4377         index = eth_dev_get_mac_addr_index(port_id, addr);
4378         if (index < 0) {
4379                 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4380                 if (index < 0) {
4381                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4382                                 port_id);
4383                         return -ENOSPC;
4384                 }
4385         } else {
4386                 pool_mask = dev->data->mac_pool_sel[index];
4387
4388                 /* Check if both MAC address and pool is already there, and do nothing */
4389                 if (pool_mask & RTE_BIT64(pool))
4390                         return 0;
4391         }
4392
4393         /* Update NIC */
4394         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4395
4396         if (ret == 0) {
4397                 /* Update address in NIC data structure */
4398                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4399
4400                 /* Update pool bitmap in NIC data structure */
4401                 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
4402         }
4403
4404         return eth_err(port_id, ret);
4405 }
4406
4407 int
4408 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4409 {
4410         struct rte_eth_dev *dev;
4411         int index;
4412
4413         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4414         dev = &rte_eth_devices[port_id];
4415
4416         if (addr == NULL) {
4417                 RTE_ETHDEV_LOG(ERR,
4418                         "Cannot remove ethdev port %u MAC address from NULL address\n",
4419                         port_id);
4420                 return -EINVAL;
4421         }
4422
4423         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4424
4425         index = eth_dev_get_mac_addr_index(port_id, addr);
4426         if (index == 0) {
4427                 RTE_ETHDEV_LOG(ERR,
4428                         "Port %u: Cannot remove default MAC address\n",
4429                         port_id);
4430                 return -EADDRINUSE;
4431         } else if (index < 0)
4432                 return 0;  /* Do nothing if address wasn't found */
4433
4434         /* Update NIC */
4435         (*dev->dev_ops->mac_addr_remove)(dev, index);
4436
4437         /* Update address in NIC data structure */
4438         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4439
4440         /* reset pool bitmap */
4441         dev->data->mac_pool_sel[index] = 0;
4442
4443         return 0;
4444 }
4445
4446 int
4447 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4448 {
4449         struct rte_eth_dev *dev;
4450         int ret;
4451
4452         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4453         dev = &rte_eth_devices[port_id];
4454
4455         if (addr == NULL) {
4456                 RTE_ETHDEV_LOG(ERR,
4457                         "Cannot set ethdev port %u default MAC address from NULL address\n",
4458                         port_id);
4459                 return -EINVAL;
4460         }
4461
4462         if (!rte_is_valid_assigned_ether_addr(addr))
4463                 return -EINVAL;
4464
4465         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4466
4467         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4468         if (ret < 0)
4469                 return ret;
4470
4471         /* Update default address in NIC data structure */
4472         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4473
4474         return 0;
4475 }
4476
4477
4478 /*
4479  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4480  * an empty spot.
4481  */
4482 static int
4483 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4484                 const struct rte_ether_addr *addr)
4485 {
4486         struct rte_eth_dev_info dev_info;
4487         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4488         unsigned i;
4489         int ret;
4490
4491         ret = rte_eth_dev_info_get(port_id, &dev_info);
4492         if (ret != 0)
4493                 return -1;
4494
4495         if (!dev->data->hash_mac_addrs)
4496                 return -1;
4497
4498         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4499                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4500                         RTE_ETHER_ADDR_LEN) == 0)
4501                         return i;
4502
4503         return -1;
4504 }
4505
4506 int
4507 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4508                                 uint8_t on)
4509 {
4510         int index;
4511         int ret;
4512         struct rte_eth_dev *dev;
4513
4514         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4515         dev = &rte_eth_devices[port_id];
4516
4517         if (addr == NULL) {
4518                 RTE_ETHDEV_LOG(ERR,
4519                         "Cannot set ethdev port %u unicast hash table from NULL address\n",
4520                         port_id);
4521                 return -EINVAL;
4522         }
4523
4524         if (rte_is_zero_ether_addr(addr)) {
4525                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4526                         port_id);
4527                 return -EINVAL;
4528         }
4529
4530         index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4531         /* Check if it's already there, and do nothing */
4532         if ((index >= 0) && on)
4533                 return 0;
4534
4535         if (index < 0) {
4536                 if (!on) {
4537                         RTE_ETHDEV_LOG(ERR,
4538                                 "Port %u: the MAC address was not set in UTA\n",
4539                                 port_id);
4540                         return -EINVAL;
4541                 }
4542
4543                 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4544                 if (index < 0) {
4545                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4546                                 port_id);
4547                         return -ENOSPC;
4548                 }
4549         }
4550
4551         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4552         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4553         if (ret == 0) {
4554                 /* Update address in NIC data structure */
4555                 if (on)
4556                         rte_ether_addr_copy(addr,
4557                                         &dev->data->hash_mac_addrs[index]);
4558                 else
4559                         rte_ether_addr_copy(&null_mac_addr,
4560                                         &dev->data->hash_mac_addrs[index]);
4561         }
4562
4563         return eth_err(port_id, ret);
4564 }
4565
4566 int
4567 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4568 {
4569         struct rte_eth_dev *dev;
4570
4571         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4572         dev = &rte_eth_devices[port_id];
4573
4574         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4575         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4576                                                                        on));
4577 }
4578
4579 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4580                                         uint16_t tx_rate)
4581 {
4582         struct rte_eth_dev *dev;
4583         struct rte_eth_dev_info dev_info;
4584         struct rte_eth_link link;
4585         int ret;
4586
4587         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4588         dev = &rte_eth_devices[port_id];
4589
4590         ret = rte_eth_dev_info_get(port_id, &dev_info);
4591         if (ret != 0)
4592                 return ret;
4593
4594         link = dev->data->dev_link;
4595
4596         if (queue_idx > dev_info.max_tx_queues) {
4597                 RTE_ETHDEV_LOG(ERR,
4598                         "Set queue rate limit:port %u: invalid queue ID=%u\n",
4599                         port_id, queue_idx);
4600                 return -EINVAL;
4601         }
4602
4603         if (tx_rate > link.link_speed) {
4604                 RTE_ETHDEV_LOG(ERR,
4605                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4606                         tx_rate, link.link_speed);
4607                 return -EINVAL;
4608         }
4609
4610         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4611         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4612                                                         queue_idx, tx_rate));
4613 }
4614
4615 RTE_INIT(eth_dev_init_fp_ops)
4616 {
4617         uint32_t i;
4618
4619         for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
4620                 eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
4621 }
4622
4623 RTE_INIT(eth_dev_init_cb_lists)
4624 {
4625         uint16_t i;
4626
4627         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4628                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4629 }
4630
4631 int
4632 rte_eth_dev_callback_register(uint16_t port_id,
4633                         enum rte_eth_event_type event,
4634                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4635 {
4636         struct rte_eth_dev *dev;
4637         struct rte_eth_dev_callback *user_cb;
4638         uint16_t next_port;
4639         uint16_t last_port;
4640
4641         if (cb_fn == NULL) {
4642                 RTE_ETHDEV_LOG(ERR,
4643                         "Cannot register ethdev port %u callback from NULL\n",
4644                         port_id);
4645                 return -EINVAL;
4646         }
4647
4648         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4649                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4650                 return -EINVAL;
4651         }
4652
4653         if (port_id == RTE_ETH_ALL) {
4654                 next_port = 0;
4655                 last_port = RTE_MAX_ETHPORTS - 1;
4656         } else {
4657                 next_port = last_port = port_id;
4658         }
4659
4660         rte_spinlock_lock(&eth_dev_cb_lock);
4661
4662         do {
4663                 dev = &rte_eth_devices[next_port];
4664
4665                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4666                         if (user_cb->cb_fn == cb_fn &&
4667                                 user_cb->cb_arg == cb_arg &&
4668                                 user_cb->event == event) {
4669                                 break;
4670                         }
4671                 }
4672
4673                 /* create a new callback. */
4674                 if (user_cb == NULL) {
4675                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4676                                 sizeof(struct rte_eth_dev_callback), 0);
4677                         if (user_cb != NULL) {
4678                                 user_cb->cb_fn = cb_fn;
4679                                 user_cb->cb_arg = cb_arg;
4680                                 user_cb->event = event;
4681                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4682                                                   user_cb, next);
4683                         } else {
4684                                 rte_spinlock_unlock(&eth_dev_cb_lock);
4685                                 rte_eth_dev_callback_unregister(port_id, event,
4686                                                                 cb_fn, cb_arg);
4687                                 return -ENOMEM;
4688                         }
4689
4690                 }
4691         } while (++next_port <= last_port);
4692
4693         rte_spinlock_unlock(&eth_dev_cb_lock);
4694         return 0;
4695 }
4696
4697 int
4698 rte_eth_dev_callback_unregister(uint16_t port_id,
4699                         enum rte_eth_event_type event,
4700                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4701 {
4702         int ret;
4703         struct rte_eth_dev *dev;
4704         struct rte_eth_dev_callback *cb, *next;
4705         uint16_t next_port;
4706         uint16_t last_port;
4707
4708         if (cb_fn == NULL) {
4709                 RTE_ETHDEV_LOG(ERR,
4710                         "Cannot unregister ethdev port %u callback from NULL\n",
4711                         port_id);
4712                 return -EINVAL;
4713         }
4714
4715         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4716                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4717                 return -EINVAL;
4718         }
4719
4720         if (port_id == RTE_ETH_ALL) {
4721                 next_port = 0;
4722                 last_port = RTE_MAX_ETHPORTS - 1;
4723         } else {
4724                 next_port = last_port = port_id;
4725         }
4726
4727         rte_spinlock_lock(&eth_dev_cb_lock);
4728
4729         do {
4730                 dev = &rte_eth_devices[next_port];
4731                 ret = 0;
4732                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4733                      cb = next) {
4734
4735                         next = TAILQ_NEXT(cb, next);
4736
4737                         if (cb->cb_fn != cb_fn || cb->event != event ||
4738                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4739                                 continue;
4740
4741                         /*
4742                          * if this callback is not executing right now,
4743                          * then remove it.
4744                          */
4745                         if (cb->active == 0) {
4746                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4747                                 rte_free(cb);
4748                         } else {
4749                                 ret = -EAGAIN;
4750                         }
4751                 }
4752         } while (++next_port <= last_port);
4753
4754         rte_spinlock_unlock(&eth_dev_cb_lock);
4755         return ret;
4756 }
4757
4758 int
4759 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4760         enum rte_eth_event_type event, void *ret_param)
4761 {
4762         struct rte_eth_dev_callback *cb_lst;
4763         struct rte_eth_dev_callback dev_cb;
4764         int rc = 0;
4765
4766         rte_spinlock_lock(&eth_dev_cb_lock);
4767         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4768                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4769                         continue;
4770                 dev_cb = *cb_lst;
4771                 cb_lst->active = 1;
4772                 if (ret_param != NULL)
4773                         dev_cb.ret_param = ret_param;
4774
4775                 rte_spinlock_unlock(&eth_dev_cb_lock);
4776                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4777                                 dev_cb.cb_arg, dev_cb.ret_param);
4778                 rte_spinlock_lock(&eth_dev_cb_lock);
4779                 cb_lst->active = 0;
4780         }
4781         rte_spinlock_unlock(&eth_dev_cb_lock);
4782         return rc;
4783 }
4784
4785 void
4786 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4787 {
4788         if (dev == NULL)
4789                 return;
4790
4791         /*
4792          * for secondary process, at that point we expect device
4793          * to be already 'usable', so shared data and all function pointers
4794          * for fast-path devops have to be setup properly inside rte_eth_dev.
4795          */
4796         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
4797                 eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
4798
4799         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4800
4801         dev->state = RTE_ETH_DEV_ATTACHED;
4802 }
4803
4804 int
4805 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4806 {
4807         uint32_t vec;
4808         struct rte_eth_dev *dev;
4809         struct rte_intr_handle *intr_handle;
4810         uint16_t qid;
4811         int rc;
4812
4813         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4814         dev = &rte_eth_devices[port_id];
4815
4816         if (!dev->intr_handle) {
4817                 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4818                 return -ENOTSUP;
4819         }
4820
4821         intr_handle = dev->intr_handle;
4822         if (!intr_handle->intr_vec) {
4823                 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4824                 return -EPERM;
4825         }
4826
4827         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4828                 vec = intr_handle->intr_vec[qid];
4829                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4830                 if (rc && rc != -EEXIST) {
4831                         RTE_ETHDEV_LOG(ERR,
4832                                 "p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4833                                 port_id, qid, op, epfd, vec);
4834                 }
4835         }
4836
4837         return 0;
4838 }
4839
4840 int
4841 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4842 {
4843         struct rte_intr_handle *intr_handle;
4844         struct rte_eth_dev *dev;
4845         unsigned int efd_idx;
4846         uint32_t vec;
4847         int fd;
4848
4849         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4850         dev = &rte_eth_devices[port_id];
4851
4852         if (queue_id >= dev->data->nb_rx_queues) {
4853                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4854                 return -1;
4855         }
4856
4857         if (!dev->intr_handle) {
4858                 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4859                 return -1;
4860         }
4861
4862         intr_handle = dev->intr_handle;
4863         if (!intr_handle->intr_vec) {
4864                 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4865                 return -1;
4866         }
4867
4868         vec = intr_handle->intr_vec[queue_id];
4869         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4870                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4871         fd = intr_handle->efds[efd_idx];
4872
4873         return fd;
4874 }
4875
4876 static inline int
4877 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4878                 const char *ring_name)
4879 {
4880         return snprintf(name, len, "eth_p%d_q%d_%s",
4881                         port_id, queue_id, ring_name);
4882 }
4883
4884 const struct rte_memzone *
4885 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4886                          uint16_t queue_id, size_t size, unsigned align,
4887                          int socket_id)
4888 {
4889         char z_name[RTE_MEMZONE_NAMESIZE];
4890         const struct rte_memzone *mz;
4891         int rc;
4892
4893         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4894                         queue_id, ring_name);
4895         if (rc >= RTE_MEMZONE_NAMESIZE) {
4896                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4897                 rte_errno = ENAMETOOLONG;
4898                 return NULL;
4899         }
4900
4901         mz = rte_memzone_lookup(z_name);
4902         if (mz) {
4903                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4904                                 size > mz->len ||
4905                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4906                         RTE_ETHDEV_LOG(ERR,
4907                                 "memzone %s does not justify the requested attributes\n",
4908                                 mz->name);
4909                         return NULL;
4910                 }
4911
4912                 return mz;
4913         }
4914
4915         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4916                         RTE_MEMZONE_IOVA_CONTIG, align);
4917 }
4918
4919 int
4920 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4921                 uint16_t queue_id)
4922 {
4923         char z_name[RTE_MEMZONE_NAMESIZE];
4924         const struct rte_memzone *mz;
4925         int rc = 0;
4926
4927         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4928                         queue_id, ring_name);
4929         if (rc >= RTE_MEMZONE_NAMESIZE) {
4930                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4931                 return -ENAMETOOLONG;
4932         }
4933
4934         mz = rte_memzone_lookup(z_name);
4935         if (mz)
4936                 rc = rte_memzone_free(mz);
4937         else
4938                 rc = -ENOENT;
4939
4940         return rc;
4941 }
4942
4943 int
4944 rte_eth_dev_create(struct rte_device *device, const char *name,
4945         size_t priv_data_size,
4946         ethdev_bus_specific_init ethdev_bus_specific_init,
4947         void *bus_init_params,
4948         ethdev_init_t ethdev_init, void *init_params)
4949 {
4950         struct rte_eth_dev *ethdev;
4951         int retval;
4952
4953         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4954
4955         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4956                 ethdev = rte_eth_dev_allocate(name);
4957                 if (!ethdev)
4958                         return -ENODEV;
4959
4960                 if (priv_data_size) {
4961                         ethdev->data->dev_private = rte_zmalloc_socket(
4962                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4963                                 device->numa_node);
4964
4965                         if (!ethdev->data->dev_private) {
4966                                 RTE_ETHDEV_LOG(ERR,
4967                                         "failed to allocate private data\n");
4968                                 retval = -ENOMEM;
4969                                 goto probe_failed;
4970                         }
4971                 }
4972         } else {
4973                 ethdev = rte_eth_dev_attach_secondary(name);
4974                 if (!ethdev) {
4975                         RTE_ETHDEV_LOG(ERR,
4976                                 "secondary process attach failed, ethdev doesn't exist\n");
4977                         return  -ENODEV;
4978                 }
4979         }
4980
4981         ethdev->device = device;
4982
4983         if (ethdev_bus_specific_init) {
4984                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4985                 if (retval) {
4986                         RTE_ETHDEV_LOG(ERR,
4987                                 "ethdev bus specific initialisation failed\n");
4988                         goto probe_failed;
4989                 }
4990         }
4991
4992         retval = ethdev_init(ethdev, init_params);
4993         if (retval) {
4994                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4995                 goto probe_failed;
4996         }
4997
4998         rte_eth_dev_probing_finish(ethdev);
4999
5000         return retval;
5001
5002 probe_failed:
5003         rte_eth_dev_release_port(ethdev);
5004         return retval;
5005 }
5006
5007 int
5008 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
5009         ethdev_uninit_t ethdev_uninit)
5010 {
5011         int ret;
5012
5013         ethdev = rte_eth_dev_allocated(ethdev->data->name);
5014         if (!ethdev)
5015                 return -ENODEV;
5016
5017         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
5018
5019         ret = ethdev_uninit(ethdev);
5020         if (ret)
5021                 return ret;
5022
5023         return rte_eth_dev_release_port(ethdev);
5024 }
5025
5026 int
5027 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
5028                           int epfd, int op, void *data)
5029 {
5030         uint32_t vec;
5031         struct rte_eth_dev *dev;
5032         struct rte_intr_handle *intr_handle;
5033         int rc;
5034
5035         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5036         dev = &rte_eth_devices[port_id];
5037
5038         if (queue_id >= dev->data->nb_rx_queues) {
5039                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5040                 return -EINVAL;
5041         }
5042
5043         if (!dev->intr_handle) {
5044                 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
5045                 return -ENOTSUP;
5046         }
5047
5048         intr_handle = dev->intr_handle;
5049         if (!intr_handle->intr_vec) {
5050                 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
5051                 return -EPERM;
5052         }
5053
5054         vec = intr_handle->intr_vec[queue_id];
5055         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
5056         if (rc && rc != -EEXIST) {
5057                 RTE_ETHDEV_LOG(ERR,
5058                         "p %u q %u Rx ctl error op %d epfd %d vec %u\n",
5059                         port_id, queue_id, op, epfd, vec);
5060                 return rc;
5061         }
5062
5063         return 0;
5064 }
5065
5066 int
5067 rte_eth_dev_rx_intr_enable(uint16_t port_id,
5068                            uint16_t queue_id)
5069 {
5070         struct rte_eth_dev *dev;
5071         int ret;
5072
5073         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5074         dev = &rte_eth_devices[port_id];
5075
5076         ret = eth_dev_validate_rx_queue(dev, queue_id);
5077         if (ret != 0)
5078                 return ret;
5079
5080         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
5081         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
5082 }
5083
5084 int
5085 rte_eth_dev_rx_intr_disable(uint16_t port_id,
5086                             uint16_t queue_id)
5087 {
5088         struct rte_eth_dev *dev;
5089         int ret;
5090
5091         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5092         dev = &rte_eth_devices[port_id];
5093
5094         ret = eth_dev_validate_rx_queue(dev, queue_id);
5095         if (ret != 0)
5096                 return ret;
5097
5098         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
5099         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
5100 }
5101
5102
5103 const struct rte_eth_rxtx_callback *
5104 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
5105                 rte_rx_callback_fn fn, void *user_param)
5106 {
5107 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5108         rte_errno = ENOTSUP;
5109         return NULL;
5110 #endif
5111         struct rte_eth_dev *dev;
5112
5113         /* check input parameters */
5114         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5115                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5116                 rte_errno = EINVAL;
5117                 return NULL;
5118         }
5119         dev = &rte_eth_devices[port_id];
5120         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5121                 rte_errno = EINVAL;
5122                 return NULL;
5123         }
5124         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5125
5126         if (cb == NULL) {
5127                 rte_errno = ENOMEM;
5128                 return NULL;
5129         }
5130
5131         cb->fn.rx = fn;
5132         cb->param = user_param;
5133
5134         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5135         /* Add the callbacks in fifo order. */
5136         struct rte_eth_rxtx_callback *tail =
5137                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5138
5139         if (!tail) {
5140                 /* Stores to cb->fn and cb->param should complete before
5141                  * cb is visible to data plane.
5142                  */
5143                 __atomic_store_n(
5144                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5145                         cb, __ATOMIC_RELEASE);
5146
5147         } else {
5148                 while (tail->next)
5149                         tail = tail->next;
5150                 /* Stores to cb->fn and cb->param should complete before
5151                  * cb is visible to data plane.
5152                  */
5153                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5154         }
5155         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5156
5157         return cb;
5158 }
5159
5160 const struct rte_eth_rxtx_callback *
5161 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5162                 rte_rx_callback_fn fn, void *user_param)
5163 {
5164 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5165         rte_errno = ENOTSUP;
5166         return NULL;
5167 #endif
5168         /* check input parameters */
5169         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5170                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5171                 rte_errno = EINVAL;
5172                 return NULL;
5173         }
5174
5175         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5176
5177         if (cb == NULL) {
5178                 rte_errno = ENOMEM;
5179                 return NULL;
5180         }
5181
5182         cb->fn.rx = fn;
5183         cb->param = user_param;
5184
5185         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5186         /* Add the callbacks at first position */
5187         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5188         /* Stores to cb->fn, cb->param and cb->next should complete before
5189          * cb is visible to data plane threads.
5190          */
5191         __atomic_store_n(
5192                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5193                 cb, __ATOMIC_RELEASE);
5194         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5195
5196         return cb;
5197 }
5198
5199 const struct rte_eth_rxtx_callback *
5200 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5201                 rte_tx_callback_fn fn, void *user_param)
5202 {
5203 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5204         rte_errno = ENOTSUP;
5205         return NULL;
5206 #endif
5207         struct rte_eth_dev *dev;
5208
5209         /* check input parameters */
5210         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5211                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
5212                 rte_errno = EINVAL;
5213                 return NULL;
5214         }
5215
5216         dev = &rte_eth_devices[port_id];
5217         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5218                 rte_errno = EINVAL;
5219                 return NULL;
5220         }
5221
5222         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5223
5224         if (cb == NULL) {
5225                 rte_errno = ENOMEM;
5226                 return NULL;
5227         }
5228
5229         cb->fn.tx = fn;
5230         cb->param = user_param;
5231
5232         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5233         /* Add the callbacks in fifo order. */
5234         struct rte_eth_rxtx_callback *tail =
5235                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
5236
5237         if (!tail) {
5238                 /* Stores to cb->fn and cb->param should complete before
5239                  * cb is visible to data plane.
5240                  */
5241                 __atomic_store_n(
5242                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
5243                         cb, __ATOMIC_RELEASE);
5244
5245         } else {
5246                 while (tail->next)
5247                         tail = tail->next;
5248                 /* Stores to cb->fn and cb->param should complete before
5249                  * cb is visible to data plane.
5250                  */
5251                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5252         }
5253         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5254
5255         return cb;
5256 }
5257
5258 int
5259 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5260                 const struct rte_eth_rxtx_callback *user_cb)
5261 {
5262 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5263         return -ENOTSUP;
5264 #endif
5265         /* Check input parameters. */
5266         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5267         if (user_cb == NULL ||
5268                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
5269                 return -EINVAL;
5270
5271         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5272         struct rte_eth_rxtx_callback *cb;
5273         struct rte_eth_rxtx_callback **prev_cb;
5274         int ret = -EINVAL;
5275
5276         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5277         prev_cb = &dev->post_rx_burst_cbs[queue_id];
5278         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5279                 cb = *prev_cb;
5280                 if (cb == user_cb) {
5281                         /* Remove the user cb from the callback list. */
5282                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5283                         ret = 0;
5284                         break;
5285                 }
5286         }
5287         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5288
5289         return ret;
5290 }
5291
5292 int
5293 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5294                 const struct rte_eth_rxtx_callback *user_cb)
5295 {
5296 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5297         return -ENOTSUP;
5298 #endif
5299         /* Check input parameters. */
5300         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5301         if (user_cb == NULL ||
5302                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5303                 return -EINVAL;
5304
5305         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5306         int ret = -EINVAL;
5307         struct rte_eth_rxtx_callback *cb;
5308         struct rte_eth_rxtx_callback **prev_cb;
5309
5310         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5311         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5312         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5313                 cb = *prev_cb;
5314                 if (cb == user_cb) {
5315                         /* Remove the user cb from the callback list. */
5316                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5317                         ret = 0;
5318                         break;
5319                 }
5320         }
5321         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5322
5323         return ret;
5324 }
5325
5326 int
5327 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5328         struct rte_eth_rxq_info *qinfo)
5329 {
5330         struct rte_eth_dev *dev;
5331
5332         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5333         dev = &rte_eth_devices[port_id];
5334
5335         if (queue_id >= dev->data->nb_rx_queues) {
5336                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5337                 return -EINVAL;
5338         }
5339
5340         if (qinfo == NULL) {
5341                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5342                         port_id, queue_id);
5343                 return -EINVAL;
5344         }
5345
5346         if (dev->data->rx_queues == NULL ||
5347                         dev->data->rx_queues[queue_id] == NULL) {
5348                 RTE_ETHDEV_LOG(ERR,
5349                                "Rx queue %"PRIu16" of device with port_id=%"
5350                                PRIu16" has not been setup\n",
5351                                queue_id, port_id);
5352                 return -EINVAL;
5353         }
5354
5355         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5356                 RTE_ETHDEV_LOG(INFO,
5357                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5358                         queue_id, port_id);
5359                 return -EINVAL;
5360         }
5361
5362         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5363
5364         memset(qinfo, 0, sizeof(*qinfo));
5365         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5366         qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5367
5368         return 0;
5369 }
5370
5371 int
5372 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5373         struct rte_eth_txq_info *qinfo)
5374 {
5375         struct rte_eth_dev *dev;
5376
5377         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5378         dev = &rte_eth_devices[port_id];
5379
5380         if (queue_id >= dev->data->nb_tx_queues) {
5381                 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5382                 return -EINVAL;
5383         }
5384
5385         if (qinfo == NULL) {
5386                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5387                         port_id, queue_id);
5388                 return -EINVAL;
5389         }
5390
5391         if (dev->data->tx_queues == NULL ||
5392                         dev->data->tx_queues[queue_id] == NULL) {
5393                 RTE_ETHDEV_LOG(ERR,
5394                                "Tx queue %"PRIu16" of device with port_id=%"
5395                                PRIu16" has not been setup\n",
5396                                queue_id, port_id);
5397                 return -EINVAL;
5398         }
5399
5400         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5401                 RTE_ETHDEV_LOG(INFO,
5402                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5403                         queue_id, port_id);
5404                 return -EINVAL;
5405         }
5406
5407         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5408
5409         memset(qinfo, 0, sizeof(*qinfo));
5410         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5411         qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5412
5413         return 0;
5414 }
5415
5416 int
5417 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5418                           struct rte_eth_burst_mode *mode)
5419 {
5420         struct rte_eth_dev *dev;
5421
5422         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5423         dev = &rte_eth_devices[port_id];
5424
5425         if (queue_id >= dev->data->nb_rx_queues) {
5426                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5427                 return -EINVAL;
5428         }
5429
5430         if (mode == NULL) {
5431                 RTE_ETHDEV_LOG(ERR,
5432                         "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5433                         port_id, queue_id);
5434                 return -EINVAL;
5435         }
5436
5437         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5438         memset(mode, 0, sizeof(*mode));
5439         return eth_err(port_id,
5440                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5441 }
5442
5443 int
5444 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5445                           struct rte_eth_burst_mode *mode)
5446 {
5447         struct rte_eth_dev *dev;
5448
5449         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5450         dev = &rte_eth_devices[port_id];
5451
5452         if (queue_id >= dev->data->nb_tx_queues) {
5453                 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5454                 return -EINVAL;
5455         }
5456
5457         if (mode == NULL) {
5458                 RTE_ETHDEV_LOG(ERR,
5459                         "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5460                         port_id, queue_id);
5461                 return -EINVAL;
5462         }
5463
5464         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5465         memset(mode, 0, sizeof(*mode));
5466         return eth_err(port_id,
5467                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5468 }
5469
5470 int
5471 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5472                 struct rte_power_monitor_cond *pmc)
5473 {
5474         struct rte_eth_dev *dev;
5475
5476         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5477         dev = &rte_eth_devices[port_id];
5478
5479         if (queue_id >= dev->data->nb_rx_queues) {
5480                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5481                 return -EINVAL;
5482         }
5483
5484         if (pmc == NULL) {
5485                 RTE_ETHDEV_LOG(ERR,
5486                         "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5487                         port_id, queue_id);
5488                 return -EINVAL;
5489         }
5490
5491         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
5492         return eth_err(port_id,
5493                 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5494 }
5495
5496 int
5497 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5498                              struct rte_ether_addr *mc_addr_set,
5499                              uint32_t nb_mc_addr)
5500 {
5501         struct rte_eth_dev *dev;
5502
5503         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5504         dev = &rte_eth_devices[port_id];
5505
5506         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5507         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5508                                                 mc_addr_set, nb_mc_addr));
5509 }
5510
5511 int
5512 rte_eth_timesync_enable(uint16_t port_id)
5513 {
5514         struct rte_eth_dev *dev;
5515
5516         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5517         dev = &rte_eth_devices[port_id];
5518
5519         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5520         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5521 }
5522
5523 int
5524 rte_eth_timesync_disable(uint16_t port_id)
5525 {
5526         struct rte_eth_dev *dev;
5527
5528         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5529         dev = &rte_eth_devices[port_id];
5530
5531         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5532         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5533 }
5534
5535 int
5536 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5537                                    uint32_t flags)
5538 {
5539         struct rte_eth_dev *dev;
5540
5541         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5542         dev = &rte_eth_devices[port_id];
5543
5544         if (timestamp == NULL) {
5545                 RTE_ETHDEV_LOG(ERR,
5546                         "Cannot read ethdev port %u Rx timestamp to NULL\n",
5547                         port_id);
5548                 return -EINVAL;
5549         }
5550
5551         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5552         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5553                                 (dev, timestamp, flags));
5554 }
5555
5556 int
5557 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5558                                    struct timespec *timestamp)
5559 {
5560         struct rte_eth_dev *dev;
5561
5562         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5563         dev = &rte_eth_devices[port_id];
5564
5565         if (timestamp == NULL) {
5566                 RTE_ETHDEV_LOG(ERR,
5567                         "Cannot read ethdev port %u Tx timestamp to NULL\n",
5568                         port_id);
5569                 return -EINVAL;
5570         }
5571
5572         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5573         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5574                                 (dev, timestamp));
5575 }
5576
5577 int
5578 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5579 {
5580         struct rte_eth_dev *dev;
5581
5582         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5583         dev = &rte_eth_devices[port_id];
5584
5585         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5586         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5587 }
5588
5589 int
5590 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5591 {
5592         struct rte_eth_dev *dev;
5593
5594         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5595         dev = &rte_eth_devices[port_id];
5596
5597         if (timestamp == NULL) {
5598                 RTE_ETHDEV_LOG(ERR,
5599                         "Cannot read ethdev port %u timesync time to NULL\n",
5600                         port_id);
5601                 return -EINVAL;
5602         }
5603
5604         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5605         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5606                                                                 timestamp));
5607 }
5608
5609 int
5610 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5611 {
5612         struct rte_eth_dev *dev;
5613
5614         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5615         dev = &rte_eth_devices[port_id];
5616
5617         if (timestamp == NULL) {
5618                 RTE_ETHDEV_LOG(ERR,
5619                         "Cannot write ethdev port %u timesync from NULL time\n",
5620                         port_id);
5621                 return -EINVAL;
5622         }
5623
5624         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5625         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5626                                                                 timestamp));
5627 }
5628
5629 int
5630 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5631 {
5632         struct rte_eth_dev *dev;
5633
5634         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5635         dev = &rte_eth_devices[port_id];
5636
5637         if (clock == NULL) {
5638                 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5639                         port_id);
5640                 return -EINVAL;
5641         }
5642
5643         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5644         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5645 }
5646
5647 int
5648 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5649 {
5650         struct rte_eth_dev *dev;
5651
5652         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5653         dev = &rte_eth_devices[port_id];
5654
5655         if (info == NULL) {
5656                 RTE_ETHDEV_LOG(ERR,
5657                         "Cannot get ethdev port %u register info to NULL\n",
5658                         port_id);
5659                 return -EINVAL;
5660         }
5661
5662         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5663         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5664 }
5665
5666 int
5667 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5668 {
5669         struct rte_eth_dev *dev;
5670
5671         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5672         dev = &rte_eth_devices[port_id];
5673
5674         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5675         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5676 }
5677
5678 int
5679 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5680 {
5681         struct rte_eth_dev *dev;
5682
5683         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5684         dev = &rte_eth_devices[port_id];
5685
5686         if (info == NULL) {
5687                 RTE_ETHDEV_LOG(ERR,
5688                         "Cannot get ethdev port %u EEPROM info to NULL\n",
5689                         port_id);
5690                 return -EINVAL;
5691         }
5692
5693         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5694         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5695 }
5696
5697 int
5698 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5699 {
5700         struct rte_eth_dev *dev;
5701
5702         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5703         dev = &rte_eth_devices[port_id];
5704
5705         if (info == NULL) {
5706                 RTE_ETHDEV_LOG(ERR,
5707                         "Cannot set ethdev port %u EEPROM from NULL info\n",
5708                         port_id);
5709                 return -EINVAL;
5710         }
5711
5712         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5713         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5714 }
5715
5716 int
5717 rte_eth_dev_get_module_info(uint16_t port_id,
5718                             struct rte_eth_dev_module_info *modinfo)
5719 {
5720         struct rte_eth_dev *dev;
5721
5722         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5723         dev = &rte_eth_devices[port_id];
5724
5725         if (modinfo == NULL) {
5726                 RTE_ETHDEV_LOG(ERR,
5727                         "Cannot get ethdev port %u EEPROM module info to NULL\n",
5728                         port_id);
5729                 return -EINVAL;
5730         }
5731
5732         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5733         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5734 }
5735
5736 int
5737 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5738                               struct rte_dev_eeprom_info *info)
5739 {
5740         struct rte_eth_dev *dev;
5741
5742         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5743         dev = &rte_eth_devices[port_id];
5744
5745         if (info == NULL) {
5746                 RTE_ETHDEV_LOG(ERR,
5747                         "Cannot get ethdev port %u module EEPROM info to NULL\n",
5748                         port_id);
5749                 return -EINVAL;
5750         }
5751
5752         if (info->data == NULL) {
5753                 RTE_ETHDEV_LOG(ERR,
5754                         "Cannot get ethdev port %u module EEPROM data to NULL\n",
5755                         port_id);
5756                 return -EINVAL;
5757         }
5758
5759         if (info->length == 0) {
5760                 RTE_ETHDEV_LOG(ERR,
5761                         "Cannot get ethdev port %u module EEPROM to data with zero size\n",
5762                         port_id);
5763                 return -EINVAL;
5764         }
5765
5766         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5767         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5768 }
5769
5770 int
5771 rte_eth_dev_get_dcb_info(uint16_t port_id,
5772                              struct rte_eth_dcb_info *dcb_info)
5773 {
5774         struct rte_eth_dev *dev;
5775
5776         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5777         dev = &rte_eth_devices[port_id];
5778
5779         if (dcb_info == NULL) {
5780                 RTE_ETHDEV_LOG(ERR,
5781                         "Cannot get ethdev port %u DCB info to NULL\n",
5782                         port_id);
5783                 return -EINVAL;
5784         }
5785
5786         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5787
5788         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5789         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5790 }
5791
5792 static void
5793 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5794                 const struct rte_eth_desc_lim *desc_lim)
5795 {
5796         if (desc_lim->nb_align != 0)
5797                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5798
5799         if (desc_lim->nb_max != 0)
5800                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5801
5802         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5803 }
5804
5805 int
5806 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5807                                  uint16_t *nb_rx_desc,
5808                                  uint16_t *nb_tx_desc)
5809 {
5810         struct rte_eth_dev_info dev_info;
5811         int ret;
5812
5813         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5814
5815         ret = rte_eth_dev_info_get(port_id, &dev_info);
5816         if (ret != 0)
5817                 return ret;
5818
5819         if (nb_rx_desc != NULL)
5820                 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5821
5822         if (nb_tx_desc != NULL)
5823                 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5824
5825         return 0;
5826 }
5827
5828 int
5829 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5830                                    struct rte_eth_hairpin_cap *cap)
5831 {
5832         struct rte_eth_dev *dev;
5833
5834         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5835         dev = &rte_eth_devices[port_id];
5836
5837         if (cap == NULL) {
5838                 RTE_ETHDEV_LOG(ERR,
5839                         "Cannot get ethdev port %u hairpin capability to NULL\n",
5840                         port_id);
5841                 return -EINVAL;
5842         }
5843
5844         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5845         memset(cap, 0, sizeof(*cap));
5846         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5847 }
5848
5849 int
5850 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5851 {
5852         if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5853                 return 1;
5854         return 0;
5855 }
5856
5857 int
5858 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5859 {
5860         if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5861                 return 1;
5862         return 0;
5863 }
5864
5865 int
5866 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5867 {
5868         struct rte_eth_dev *dev;
5869
5870         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5871         dev = &rte_eth_devices[port_id];
5872
5873         if (pool == NULL) {
5874                 RTE_ETHDEV_LOG(ERR,
5875                         "Cannot test ethdev port %u mempool operation from NULL pool\n",
5876                         port_id);
5877                 return -EINVAL;
5878         }
5879
5880         if (*dev->dev_ops->pool_ops_supported == NULL)
5881                 return 1; /* all pools are supported */
5882
5883         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5884 }
5885
5886 /**
5887  * A set of values to describe the possible states of a switch domain.
5888  */
5889 enum rte_eth_switch_domain_state {
5890         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5891         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5892 };
5893
5894 /**
5895  * Array of switch domains available for allocation. Array is sized to
5896  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5897  * ethdev ports in a single process.
5898  */
5899 static struct rte_eth_dev_switch {
5900         enum rte_eth_switch_domain_state state;
5901 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
5902
5903 int
5904 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5905 {
5906         uint16_t i;
5907
5908         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5909
5910         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5911                 if (eth_dev_switch_domains[i].state ==
5912                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5913                         eth_dev_switch_domains[i].state =
5914                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5915                         *domain_id = i;
5916                         return 0;
5917                 }
5918         }
5919
5920         return -ENOSPC;
5921 }
5922
5923 int
5924 rte_eth_switch_domain_free(uint16_t domain_id)
5925 {
5926         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5927                 domain_id >= RTE_MAX_ETHPORTS)
5928                 return -EINVAL;
5929
5930         if (eth_dev_switch_domains[domain_id].state !=
5931                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5932                 return -EINVAL;
5933
5934         eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5935
5936         return 0;
5937 }
5938
5939 static int
5940 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5941 {
5942         int state;
5943         struct rte_kvargs_pair *pair;
5944         char *letter;
5945
5946         arglist->str = strdup(str_in);
5947         if (arglist->str == NULL)
5948                 return -ENOMEM;
5949
5950         letter = arglist->str;
5951         state = 0;
5952         arglist->count = 0;
5953         pair = &arglist->pairs[0];
5954         while (1) {
5955                 switch (state) {
5956                 case 0: /* Initial */
5957                         if (*letter == '=')
5958                                 return -EINVAL;
5959                         else if (*letter == '\0')
5960                                 return 0;
5961
5962                         state = 1;
5963                         pair->key = letter;
5964                         /* fall-thru */
5965
5966                 case 1: /* Parsing key */
5967                         if (*letter == '=') {
5968                                 *letter = '\0';
5969                                 pair->value = letter + 1;
5970                                 state = 2;
5971                         } else if (*letter == ',' || *letter == '\0')
5972                                 return -EINVAL;
5973                         break;
5974
5975
5976                 case 2: /* Parsing value */
5977                         if (*letter == '[')
5978                                 state = 3;
5979                         else if (*letter == ',') {
5980                                 *letter = '\0';
5981                                 arglist->count++;
5982                                 pair = &arglist->pairs[arglist->count];
5983                                 state = 0;
5984                         } else if (*letter == '\0') {
5985                                 letter--;
5986                                 arglist->count++;
5987                                 pair = &arglist->pairs[arglist->count];
5988                                 state = 0;
5989                         }
5990                         break;
5991
5992                 case 3: /* Parsing list */
5993                         if (*letter == ']')
5994                                 state = 2;
5995                         else if (*letter == '\0')
5996                                 return -EINVAL;
5997                         break;
5998                 }
5999                 letter++;
6000         }
6001 }
6002
6003 int
6004 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
6005 {
6006         struct rte_kvargs args;
6007         struct rte_kvargs_pair *pair;
6008         unsigned int i;
6009         int result = 0;
6010
6011         memset(eth_da, 0, sizeof(*eth_da));
6012
6013         result = eth_dev_devargs_tokenise(&args, dargs);
6014         if (result < 0)
6015                 goto parse_cleanup;
6016
6017         for (i = 0; i < args.count; i++) {
6018                 pair = &args.pairs[i];
6019                 if (strcmp("representor", pair->key) == 0) {
6020                         if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
6021                                 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
6022                                         dargs);
6023                                 result = -1;
6024                                 goto parse_cleanup;
6025                         }
6026                         result = rte_eth_devargs_parse_representor_ports(
6027                                         pair->value, eth_da);
6028                         if (result < 0)
6029                                 goto parse_cleanup;
6030                 }
6031         }
6032
6033 parse_cleanup:
6034         if (args.str)
6035                 free(args.str);
6036
6037         return result;
6038 }
6039
6040 int
6041 rte_eth_representor_id_get(uint16_t port_id,
6042                            enum rte_eth_representor_type type,
6043                            int controller, int pf, int representor_port,
6044                            uint16_t *repr_id)
6045 {
6046         int ret, n, count;
6047         uint32_t i;
6048         struct rte_eth_representor_info *info = NULL;
6049         size_t size;
6050
6051         if (type == RTE_ETH_REPRESENTOR_NONE)
6052                 return 0;
6053         if (repr_id == NULL)
6054                 return -EINVAL;
6055
6056         /* Get PMD representor range info. */
6057         ret = rte_eth_representor_info_get(port_id, NULL);
6058         if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
6059             controller == -1 && pf == -1) {
6060                 /* Direct mapping for legacy VF representor. */
6061                 *repr_id = representor_port;
6062                 return 0;
6063         } else if (ret < 0) {
6064                 return ret;
6065         }
6066         n = ret;
6067         size = sizeof(*info) + n * sizeof(info->ranges[0]);
6068         info = calloc(1, size);
6069         if (info == NULL)
6070                 return -ENOMEM;
6071         info->nb_ranges_alloc = n;
6072         ret = rte_eth_representor_info_get(port_id, info);
6073         if (ret < 0)
6074                 goto out;
6075
6076         /* Default controller and pf to caller. */
6077         if (controller == -1)
6078                 controller = info->controller;
6079         if (pf == -1)
6080                 pf = info->pf;
6081
6082         /* Locate representor ID. */
6083         ret = -ENOENT;
6084         for (i = 0; i < info->nb_ranges; ++i) {
6085                 if (info->ranges[i].type != type)
6086                         continue;
6087                 if (info->ranges[i].controller != controller)
6088                         continue;
6089                 if (info->ranges[i].id_end < info->ranges[i].id_base) {
6090                         RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
6091                                 port_id, info->ranges[i].id_base,
6092                                 info->ranges[i].id_end, i);
6093                         continue;
6094
6095                 }
6096                 count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
6097                 switch (info->ranges[i].type) {
6098                 case RTE_ETH_REPRESENTOR_PF:
6099                         if (pf < info->ranges[i].pf ||
6100                             pf >= info->ranges[i].pf + count)
6101                                 continue;
6102                         *repr_id = info->ranges[i].id_base +
6103                                    (pf - info->ranges[i].pf);
6104                         ret = 0;
6105                         goto out;
6106                 case RTE_ETH_REPRESENTOR_VF:
6107                         if (info->ranges[i].pf != pf)
6108                                 continue;
6109                         if (representor_port < info->ranges[i].vf ||
6110                             representor_port >= info->ranges[i].vf + count)
6111                                 continue;
6112                         *repr_id = info->ranges[i].id_base +
6113                                    (representor_port - info->ranges[i].vf);
6114                         ret = 0;
6115                         goto out;
6116                 case RTE_ETH_REPRESENTOR_SF:
6117                         if (info->ranges[i].pf != pf)
6118                                 continue;
6119                         if (representor_port < info->ranges[i].sf ||
6120                             representor_port >= info->ranges[i].sf + count)
6121                                 continue;
6122                         *repr_id = info->ranges[i].id_base +
6123                               (representor_port - info->ranges[i].sf);
6124                         ret = 0;
6125                         goto out;
6126                 default:
6127                         break;
6128                 }
6129         }
6130 out:
6131         free(info);
6132         return ret;
6133 }
6134
6135 static int
6136 eth_dev_handle_port_list(const char *cmd __rte_unused,
6137                 const char *params __rte_unused,
6138                 struct rte_tel_data *d)
6139 {
6140         int port_id;
6141
6142         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
6143         RTE_ETH_FOREACH_DEV(port_id)
6144                 rte_tel_data_add_array_int(d, port_id);
6145         return 0;
6146 }
6147
6148 static void
6149 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
6150                 const char *stat_name)
6151 {
6152         int q;
6153         struct rte_tel_data *q_data = rte_tel_data_alloc();
6154         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
6155         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
6156                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
6157         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
6158 }
6159
6160 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
6161
6162 static int
6163 eth_dev_handle_port_stats(const char *cmd __rte_unused,
6164                 const char *params,
6165                 struct rte_tel_data *d)
6166 {
6167         struct rte_eth_stats stats;
6168         int port_id, ret;
6169
6170         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6171                 return -1;
6172
6173         port_id = atoi(params);
6174         if (!rte_eth_dev_is_valid_port(port_id))
6175                 return -1;
6176
6177         ret = rte_eth_stats_get(port_id, &stats);
6178         if (ret < 0)
6179                 return -1;
6180
6181         rte_tel_data_start_dict(d);
6182         ADD_DICT_STAT(stats, ipackets);
6183         ADD_DICT_STAT(stats, opackets);
6184         ADD_DICT_STAT(stats, ibytes);
6185         ADD_DICT_STAT(stats, obytes);
6186         ADD_DICT_STAT(stats, imissed);
6187         ADD_DICT_STAT(stats, ierrors);
6188         ADD_DICT_STAT(stats, oerrors);
6189         ADD_DICT_STAT(stats, rx_nombuf);
6190         eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
6191         eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
6192         eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
6193         eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
6194         eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
6195
6196         return 0;
6197 }
6198
6199 static int
6200 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
6201                 const char *params,
6202                 struct rte_tel_data *d)
6203 {
6204         struct rte_eth_xstat *eth_xstats;
6205         struct rte_eth_xstat_name *xstat_names;
6206         int port_id, num_xstats;
6207         int i, ret;
6208         char *end_param;
6209
6210         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6211                 return -1;
6212
6213         port_id = strtoul(params, &end_param, 0);
6214         if (*end_param != '\0')
6215                 RTE_ETHDEV_LOG(NOTICE,
6216                         "Extra parameters passed to ethdev telemetry command, ignoring");
6217         if (!rte_eth_dev_is_valid_port(port_id))
6218                 return -1;
6219
6220         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
6221         if (num_xstats < 0)
6222                 return -1;
6223
6224         /* use one malloc for both names and stats */
6225         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
6226                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
6227         if (eth_xstats == NULL)
6228                 return -1;
6229         xstat_names = (void *)&eth_xstats[num_xstats];
6230
6231         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
6232         if (ret < 0 || ret > num_xstats) {
6233                 free(eth_xstats);
6234                 return -1;
6235         }
6236
6237         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
6238         if (ret < 0 || ret > num_xstats) {
6239                 free(eth_xstats);
6240                 return -1;
6241         }
6242
6243         rte_tel_data_start_dict(d);
6244         for (i = 0; i < num_xstats; i++)
6245                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
6246                                 eth_xstats[i].value);
6247         return 0;
6248 }
6249
6250 static int
6251 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
6252                 const char *params,
6253                 struct rte_tel_data *d)
6254 {
6255         static const char *status_str = "status";
6256         int ret, port_id;
6257         struct rte_eth_link link;
6258         char *end_param;
6259
6260         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6261                 return -1;
6262
6263         port_id = strtoul(params, &end_param, 0);
6264         if (*end_param != '\0')
6265                 RTE_ETHDEV_LOG(NOTICE,
6266                         "Extra parameters passed to ethdev telemetry command, ignoring");
6267         if (!rte_eth_dev_is_valid_port(port_id))
6268                 return -1;
6269
6270         ret = rte_eth_link_get_nowait(port_id, &link);
6271         if (ret < 0)
6272                 return -1;
6273
6274         rte_tel_data_start_dict(d);
6275         if (!link.link_status) {
6276                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
6277                 return 0;
6278         }
6279         rte_tel_data_add_dict_string(d, status_str, "UP");
6280         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
6281         rte_tel_data_add_dict_string(d, "duplex",
6282                         (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
6283                                 "full-duplex" : "half-duplex");
6284         return 0;
6285 }
6286
6287 static int
6288 eth_dev_handle_port_info(const char *cmd __rte_unused,
6289                 const char *params,
6290                 struct rte_tel_data *d)
6291 {
6292         struct rte_tel_data *rxq_state, *txq_state;
6293         char mac_addr[RTE_ETHER_ADDR_LEN];
6294         struct rte_eth_dev *eth_dev;
6295         char *end_param;
6296         int port_id, i;
6297
6298         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6299                 return -1;
6300
6301         port_id = strtoul(params, &end_param, 0);
6302         if (*end_param != '\0')
6303                 RTE_ETHDEV_LOG(NOTICE,
6304                         "Extra parameters passed to ethdev telemetry command, ignoring");
6305
6306         if (!rte_eth_dev_is_valid_port(port_id))
6307                 return -EINVAL;
6308
6309         eth_dev = &rte_eth_devices[port_id];
6310         if (!eth_dev)
6311                 return -EINVAL;
6312
6313         rxq_state = rte_tel_data_alloc();
6314         if (!rxq_state)
6315                 return -ENOMEM;
6316
6317         txq_state = rte_tel_data_alloc();
6318         if (!txq_state)
6319                 return -ENOMEM;
6320
6321         rte_tel_data_start_dict(d);
6322         rte_tel_data_add_dict_string(d, "name", eth_dev->data->name);
6323         rte_tel_data_add_dict_int(d, "state", eth_dev->state);
6324         rte_tel_data_add_dict_int(d, "nb_rx_queues",
6325                         eth_dev->data->nb_rx_queues);
6326         rte_tel_data_add_dict_int(d, "nb_tx_queues",
6327                         eth_dev->data->nb_tx_queues);
6328         rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id);
6329         rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu);
6330         rte_tel_data_add_dict_int(d, "rx_mbuf_size_min",
6331                         eth_dev->data->min_rx_buf_size);
6332         rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail",
6333                         eth_dev->data->rx_mbuf_alloc_failed);
6334         snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x",
6335                          eth_dev->data->mac_addrs->addr_bytes[0],
6336                          eth_dev->data->mac_addrs->addr_bytes[1],
6337                          eth_dev->data->mac_addrs->addr_bytes[2],
6338                          eth_dev->data->mac_addrs->addr_bytes[3],
6339                          eth_dev->data->mac_addrs->addr_bytes[4],
6340                          eth_dev->data->mac_addrs->addr_bytes[5]);
6341         rte_tel_data_add_dict_string(d, "mac_addr", mac_addr);
6342         rte_tel_data_add_dict_int(d, "promiscuous",
6343                         eth_dev->data->promiscuous);
6344         rte_tel_data_add_dict_int(d, "scattered_rx",
6345                         eth_dev->data->scattered_rx);
6346         rte_tel_data_add_dict_int(d, "all_multicast",
6347                         eth_dev->data->all_multicast);
6348         rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started);
6349         rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro);
6350         rte_tel_data_add_dict_int(d, "dev_configured",
6351                         eth_dev->data->dev_configured);
6352
6353         rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL);
6354         for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
6355                 rte_tel_data_add_array_int(rxq_state,
6356                                 eth_dev->data->rx_queue_state[i]);
6357
6358         rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL);
6359         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
6360                 rte_tel_data_add_array_int(txq_state,
6361                                 eth_dev->data->tx_queue_state[i]);
6362
6363         rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0);
6364         rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0);
6365         rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node);
6366         rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags);
6367         rte_tel_data_add_dict_int(d, "rx_offloads",
6368                         eth_dev->data->dev_conf.rxmode.offloads);
6369         rte_tel_data_add_dict_int(d, "tx_offloads",
6370                         eth_dev->data->dev_conf.txmode.offloads);
6371         rte_tel_data_add_dict_int(d, "ethdev_rss_hf",
6372                         eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
6373
6374         return 0;
6375 }
6376
6377 int
6378 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
6379                                   struct rte_hairpin_peer_info *cur_info,
6380                                   struct rte_hairpin_peer_info *peer_info,
6381                                   uint32_t direction)
6382 {
6383         struct rte_eth_dev *dev;
6384
6385         /* Current queue information is not mandatory. */
6386         if (peer_info == NULL)
6387                 return -EINVAL;
6388
6389         /* No need to check the validity again. */
6390         dev = &rte_eth_devices[peer_port];
6391         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
6392                                 -ENOTSUP);
6393
6394         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
6395                                         cur_info, peer_info, direction);
6396 }
6397
6398 int
6399 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
6400                                 struct rte_hairpin_peer_info *peer_info,
6401                                 uint32_t direction)
6402 {
6403         struct rte_eth_dev *dev;
6404
6405         if (peer_info == NULL)
6406                 return -EINVAL;
6407
6408         /* No need to check the validity again. */
6409         dev = &rte_eth_devices[cur_port];
6410         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
6411                                 -ENOTSUP);
6412
6413         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
6414                                                         peer_info, direction);
6415 }
6416
6417 int
6418 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
6419                                   uint32_t direction)
6420 {
6421         struct rte_eth_dev *dev;
6422
6423         /* No need to check the validity again. */
6424         dev = &rte_eth_devices[cur_port];
6425         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
6426                                 -ENOTSUP);
6427
6428         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
6429                                                           direction);
6430 }
6431
6432 int
6433 rte_eth_representor_info_get(uint16_t port_id,
6434                              struct rte_eth_representor_info *info)
6435 {
6436         struct rte_eth_dev *dev;
6437
6438         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6439         dev = &rte_eth_devices[port_id];
6440
6441         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
6442         return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6443 }
6444
6445 int
6446 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
6447 {
6448         struct rte_eth_dev *dev;
6449
6450         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6451         dev = &rte_eth_devices[port_id];
6452
6453         if (dev->data->dev_configured != 0) {
6454                 RTE_ETHDEV_LOG(ERR,
6455                         "The port (ID=%"PRIu16") is already configured\n",
6456                         port_id);
6457                 return -EBUSY;
6458         }
6459
6460         if (features == NULL) {
6461                 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
6462                 return -EINVAL;
6463         }
6464
6465         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP);
6466         return eth_err(port_id,
6467                        (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
6468 }
6469
6470 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
6471
6472 RTE_INIT(ethdev_init_telemetry)
6473 {
6474         rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
6475                         "Returns list of available ethdev ports. Takes no parameters");
6476         rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
6477                         "Returns the common stats for a port. Parameters: int port_id");
6478         rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
6479                         "Returns the extended stats for a port. Parameters: int port_id");
6480         rte_telemetry_register_cmd("/ethdev/link_status",
6481                         eth_dev_handle_port_link_status,
6482                         "Returns the link status for a port. Parameters: int port_id");
6483         rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info,
6484                         "Returns the device info for a port. Parameters: int port_id");
6485 }