ethdev: add sanity checks in control APIs
[dpdk.git] / lib / ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13
14 #include <rte_byteorder.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_interrupts.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_memzone.h>
21 #include <rte_launch.h>
22 #include <rte_eal.h>
23 #include <rte_per_lcore.h>
24 #include <rte_lcore.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_common.h>
27 #include <rte_mempool.h>
28 #include <rte_malloc.h>
29 #include <rte_mbuf.h>
30 #include <rte_errno.h>
31 #include <rte_spinlock.h>
32 #include <rte_string_fns.h>
33 #include <rte_kvargs.h>
34 #include <rte_class.h>
35 #include <rte_ether.h>
36 #include <rte_telemetry.h>
37
38 #include "rte_ethdev_trace.h"
39 #include "rte_ethdev.h"
40 #include "ethdev_driver.h"
41 #include "ethdev_profile.h"
42 #include "ethdev_private.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46
47 /* spinlock for eth device callbacks */
48 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* spinlock for add/remove rx callbacks */
51 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
52
53 /* spinlock for add/remove tx callbacks */
54 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for shared data allocation */
57 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* store statistics names and its offset in stats structure  */
60 struct rte_eth_xstats_name_off {
61         char name[RTE_ETH_XSTATS_NAME_SIZE];
62         unsigned offset;
63 };
64
65 /* Shared memory between primary and secondary processes. */
66 static struct {
67         uint64_t next_owner_id;
68         rte_spinlock_t ownership_lock;
69         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
70 } *eth_dev_shared_data;
71
72 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
73         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
74         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
75         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
76         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
77         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
78         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
79         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
80         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
81                 rx_nombuf)},
82 };
83
84 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
85
86 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
87         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
88         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
89         {"errors", offsetof(struct rte_eth_stats, q_errors)},
90 };
91
92 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
93
94 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
95         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
96         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
97 };
98 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
99
100 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
101         { DEV_RX_OFFLOAD_##_name, #_name }
102
103 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)       \
104         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
105
106 static const struct {
107         uint64_t offload;
108         const char *name;
109 } eth_dev_rx_offload_names[] = {
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
111         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
112         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
113         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
115         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
119         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
120         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
121         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
122         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
123         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
124         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
125         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
126         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
127         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
128         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
129         RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
130 };
131
132 #undef RTE_RX_OFFLOAD_BIT2STR
133 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } eth_dev_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
163         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
164 };
165
166 #undef RTE_TX_OFFLOAD_BIT2STR
167
168 /**
169  * The user application callback description.
170  *
171  * It contains callback address to be registered by user application,
172  * the pointer to the parameters for callback, and the event type.
173  */
174 struct rte_eth_dev_callback {
175         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
176         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
177         void *cb_arg;                           /**< Parameter for callback */
178         void *ret_param;                        /**< Return parameter */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 int
189 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
190 {
191         int ret;
192         struct rte_devargs devargs;
193         const char *bus_param_key;
194         char *bus_str = NULL;
195         char *cls_str = NULL;
196         int str_size;
197
198         if (iter == NULL) {
199                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
200                 return -EINVAL;
201         }
202
203         if (devargs_str == NULL) {
204                 RTE_ETHDEV_LOG(ERR,
205                         "Cannot initialize iterator from NULL device description string\n");
206                 return -EINVAL;
207         }
208
209         memset(iter, 0, sizeof(*iter));
210         memset(&devargs, 0, sizeof(devargs));
211
212         /*
213          * The devargs string may use various syntaxes:
214          *   - 0000:08:00.0,representor=[1-3]
215          *   - pci:0000:06:00.0,representor=[0,5]
216          *   - class=eth,mac=00:11:22:33:44:55
217          * A new syntax is in development (not yet supported):
218          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
219          */
220
221         /*
222          * Handle pure class filter (i.e. without any bus-level argument),
223          * from future new syntax.
224          * rte_devargs_parse() is not yet supporting the new syntax,
225          * that's why this simple case is temporarily parsed here.
226          */
227 #define iter_anybus_str "class=eth,"
228         if (strncmp(devargs_str, iter_anybus_str,
229                         strlen(iter_anybus_str)) == 0) {
230                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
231                 goto end;
232         }
233
234         /* Split bus, device and parameters. */
235         ret = rte_devargs_parse(&devargs, devargs_str);
236         if (ret != 0)
237                 goto error;
238
239         /*
240          * Assume parameters of old syntax can match only at ethdev level.
241          * Extra parameters will be ignored, thanks to "+" prefix.
242          */
243         str_size = strlen(devargs.args) + 2;
244         cls_str = malloc(str_size);
245         if (cls_str == NULL) {
246                 ret = -ENOMEM;
247                 goto error;
248         }
249         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
250         if (ret != str_size - 1) {
251                 ret = -EINVAL;
252                 goto error;
253         }
254         iter->cls_str = cls_str;
255
256         iter->bus = devargs.bus;
257         if (iter->bus->dev_iterate == NULL) {
258                 ret = -ENOTSUP;
259                 goto error;
260         }
261
262         /* Convert bus args to new syntax for use with new API dev_iterate. */
263         if (strcmp(iter->bus->name, "vdev") == 0) {
264                 bus_param_key = "name";
265         } else if (strcmp(iter->bus->name, "pci") == 0) {
266                 bus_param_key = "addr";
267         } else {
268                 ret = -ENOTSUP;
269                 goto error;
270         }
271         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
272         bus_str = malloc(str_size);
273         if (bus_str == NULL) {
274                 ret = -ENOMEM;
275                 goto error;
276         }
277         ret = snprintf(bus_str, str_size, "%s=%s",
278                         bus_param_key, devargs.name);
279         if (ret != str_size - 1) {
280                 ret = -EINVAL;
281                 goto error;
282         }
283         iter->bus_str = bus_str;
284
285 end:
286         iter->cls = rte_class_find_by_name("eth");
287         rte_devargs_reset(&devargs);
288         return 0;
289
290 error:
291         if (ret == -ENOTSUP)
292                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
293                                 iter->bus->name);
294         rte_devargs_reset(&devargs);
295         free(bus_str);
296         free(cls_str);
297         return ret;
298 }
299
300 uint16_t
301 rte_eth_iterator_next(struct rte_dev_iterator *iter)
302 {
303         if (iter == NULL) {
304                 RTE_ETHDEV_LOG(ERR,
305                         "Cannot get next device from NULL iterator\n");
306                 return RTE_MAX_ETHPORTS;
307         }
308
309         if (iter->cls == NULL) /* invalid ethdev iterator */
310                 return RTE_MAX_ETHPORTS;
311
312         do { /* loop to try all matching rte_device */
313                 /* If not pure ethdev filter and */
314                 if (iter->bus != NULL &&
315                                 /* not in middle of rte_eth_dev iteration, */
316                                 iter->class_device == NULL) {
317                         /* get next rte_device to try. */
318                         iter->device = iter->bus->dev_iterate(
319                                         iter->device, iter->bus_str, iter);
320                         if (iter->device == NULL)
321                                 break; /* no more rte_device candidate */
322                 }
323                 /* A device is matching bus part, need to check ethdev part. */
324                 iter->class_device = iter->cls->dev_iterate(
325                                 iter->class_device, iter->cls_str, iter);
326                 if (iter->class_device != NULL)
327                         return eth_dev_to_id(iter->class_device); /* match */
328         } while (iter->bus != NULL); /* need to try next rte_device */
329
330         /* No more ethdev port to iterate. */
331         rte_eth_iterator_cleanup(iter);
332         return RTE_MAX_ETHPORTS;
333 }
334
335 void
336 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
337 {
338         if (iter == NULL) {
339                 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
340                 return;
341         }
342
343         if (iter->bus_str == NULL)
344                 return; /* nothing to free in pure class filter */
345         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
346         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
347         memset(iter, 0, sizeof(*iter));
348 }
349
350 uint16_t
351 rte_eth_find_next(uint16_t port_id)
352 {
353         while (port_id < RTE_MAX_ETHPORTS &&
354                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
355                 port_id++;
356
357         if (port_id >= RTE_MAX_ETHPORTS)
358                 return RTE_MAX_ETHPORTS;
359
360         return port_id;
361 }
362
363 /*
364  * Macro to iterate over all valid ports for internal usage.
365  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
366  */
367 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
368         for (port_id = rte_eth_find_next(0); \
369              port_id < RTE_MAX_ETHPORTS; \
370              port_id = rte_eth_find_next(port_id + 1))
371
372 uint16_t
373 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
374 {
375         port_id = rte_eth_find_next(port_id);
376         while (port_id < RTE_MAX_ETHPORTS &&
377                         rte_eth_devices[port_id].device != parent)
378                 port_id = rte_eth_find_next(port_id + 1);
379
380         return port_id;
381 }
382
383 uint16_t
384 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
385 {
386         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
387         return rte_eth_find_next_of(port_id,
388                         rte_eth_devices[ref_port_id].device);
389 }
390
391 static void
392 eth_dev_shared_data_prepare(void)
393 {
394         const unsigned flags = 0;
395         const struct rte_memzone *mz;
396
397         rte_spinlock_lock(&eth_dev_shared_data_lock);
398
399         if (eth_dev_shared_data == NULL) {
400                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
401                         /* Allocate port data and ownership shared memory. */
402                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
403                                         sizeof(*eth_dev_shared_data),
404                                         rte_socket_id(), flags);
405                 } else
406                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
407                 if (mz == NULL)
408                         rte_panic("Cannot allocate ethdev shared data\n");
409
410                 eth_dev_shared_data = mz->addr;
411                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
412                         eth_dev_shared_data->next_owner_id =
413                                         RTE_ETH_DEV_NO_OWNER + 1;
414                         rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
415                         memset(eth_dev_shared_data->data, 0,
416                                sizeof(eth_dev_shared_data->data));
417                 }
418         }
419
420         rte_spinlock_unlock(&eth_dev_shared_data_lock);
421 }
422
423 static bool
424 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
425 {
426         return ethdev->data->name[0] != '\0';
427 }
428
429 static struct rte_eth_dev *
430 eth_dev_allocated(const char *name)
431 {
432         uint16_t i;
433
434         RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
435
436         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
437                 if (rte_eth_devices[i].data != NULL &&
438                     strcmp(rte_eth_devices[i].data->name, name) == 0)
439                         return &rte_eth_devices[i];
440         }
441         return NULL;
442 }
443
444 struct rte_eth_dev *
445 rte_eth_dev_allocated(const char *name)
446 {
447         struct rte_eth_dev *ethdev;
448
449         eth_dev_shared_data_prepare();
450
451         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
452
453         ethdev = eth_dev_allocated(name);
454
455         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
456
457         return ethdev;
458 }
459
460 static uint16_t
461 eth_dev_find_free_port(void)
462 {
463         uint16_t i;
464
465         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
466                 /* Using shared name field to find a free port. */
467                 if (eth_dev_shared_data->data[i].name[0] == '\0') {
468                         RTE_ASSERT(rte_eth_devices[i].state ==
469                                    RTE_ETH_DEV_UNUSED);
470                         return i;
471                 }
472         }
473         return RTE_MAX_ETHPORTS;
474 }
475
476 static struct rte_eth_dev *
477 eth_dev_get(uint16_t port_id)
478 {
479         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
480
481         eth_dev->data = &eth_dev_shared_data->data[port_id];
482
483         return eth_dev;
484 }
485
486 struct rte_eth_dev *
487 rte_eth_dev_allocate(const char *name)
488 {
489         uint16_t port_id;
490         struct rte_eth_dev *eth_dev = NULL;
491         size_t name_len;
492
493         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
494         if (name_len == 0) {
495                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
496                 return NULL;
497         }
498
499         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
500                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
501                 return NULL;
502         }
503
504         eth_dev_shared_data_prepare();
505
506         /* Synchronize port creation between primary and secondary threads. */
507         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
508
509         if (eth_dev_allocated(name) != NULL) {
510                 RTE_ETHDEV_LOG(ERR,
511                         "Ethernet device with name %s already allocated\n",
512                         name);
513                 goto unlock;
514         }
515
516         port_id = eth_dev_find_free_port();
517         if (port_id == RTE_MAX_ETHPORTS) {
518                 RTE_ETHDEV_LOG(ERR,
519                         "Reached maximum number of Ethernet ports\n");
520                 goto unlock;
521         }
522
523         eth_dev = eth_dev_get(port_id);
524         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
525         eth_dev->data->port_id = port_id;
526         eth_dev->data->mtu = RTE_ETHER_MTU;
527         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
528
529 unlock:
530         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
531
532         return eth_dev;
533 }
534
535 /*
536  * Attach to a port already registered by the primary process, which
537  * makes sure that the same device would have the same port id both
538  * in the primary and secondary process.
539  */
540 struct rte_eth_dev *
541 rte_eth_dev_attach_secondary(const char *name)
542 {
543         uint16_t i;
544         struct rte_eth_dev *eth_dev = NULL;
545
546         eth_dev_shared_data_prepare();
547
548         /* Synchronize port attachment to primary port creation and release. */
549         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
550
551         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
552                 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
553                         break;
554         }
555         if (i == RTE_MAX_ETHPORTS) {
556                 RTE_ETHDEV_LOG(ERR,
557                         "Device %s is not driven by the primary process\n",
558                         name);
559         } else {
560                 eth_dev = eth_dev_get(i);
561                 RTE_ASSERT(eth_dev->data->port_id == i);
562         }
563
564         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
565         return eth_dev;
566 }
567
568 int
569 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
570 {
571         if (eth_dev == NULL)
572                 return -EINVAL;
573
574         eth_dev_shared_data_prepare();
575
576         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
577                 rte_eth_dev_callback_process(eth_dev,
578                                 RTE_ETH_EVENT_DESTROY, NULL);
579
580         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
581
582         eth_dev->state = RTE_ETH_DEV_UNUSED;
583         eth_dev->device = NULL;
584         eth_dev->process_private = NULL;
585         eth_dev->intr_handle = NULL;
586         eth_dev->rx_pkt_burst = NULL;
587         eth_dev->tx_pkt_burst = NULL;
588         eth_dev->tx_pkt_prepare = NULL;
589         eth_dev->rx_queue_count = NULL;
590         eth_dev->rx_descriptor_done = NULL;
591         eth_dev->rx_descriptor_status = NULL;
592         eth_dev->tx_descriptor_status = NULL;
593         eth_dev->dev_ops = NULL;
594
595         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
596                 rte_free(eth_dev->data->rx_queues);
597                 rte_free(eth_dev->data->tx_queues);
598                 rte_free(eth_dev->data->mac_addrs);
599                 rte_free(eth_dev->data->hash_mac_addrs);
600                 rte_free(eth_dev->data->dev_private);
601                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
602                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
603         }
604
605         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
606
607         return 0;
608 }
609
610 int
611 rte_eth_dev_is_valid_port(uint16_t port_id)
612 {
613         if (port_id >= RTE_MAX_ETHPORTS ||
614             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
615                 return 0;
616         else
617                 return 1;
618 }
619
620 static int
621 eth_is_valid_owner_id(uint64_t owner_id)
622 {
623         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
624             eth_dev_shared_data->next_owner_id <= owner_id)
625                 return 0;
626         return 1;
627 }
628
629 uint64_t
630 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
631 {
632         port_id = rte_eth_find_next(port_id);
633         while (port_id < RTE_MAX_ETHPORTS &&
634                         rte_eth_devices[port_id].data->owner.id != owner_id)
635                 port_id = rte_eth_find_next(port_id + 1);
636
637         return port_id;
638 }
639
640 int
641 rte_eth_dev_owner_new(uint64_t *owner_id)
642 {
643         if (owner_id == NULL) {
644                 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
645                 return -EINVAL;
646         }
647
648         eth_dev_shared_data_prepare();
649
650         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
651
652         *owner_id = eth_dev_shared_data->next_owner_id++;
653
654         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
655         return 0;
656 }
657
658 static int
659 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
660                        const struct rte_eth_dev_owner *new_owner)
661 {
662         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
663         struct rte_eth_dev_owner *port_owner;
664
665         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
666                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
667                         port_id);
668                 return -ENODEV;
669         }
670
671         if (new_owner == NULL) {
672                 RTE_ETHDEV_LOG(ERR,
673                         "Cannot set ethdev port %u owner from NULL owner\n",
674                         port_id);
675                 return -EINVAL;
676         }
677
678         if (!eth_is_valid_owner_id(new_owner->id) &&
679             !eth_is_valid_owner_id(old_owner_id)) {
680                 RTE_ETHDEV_LOG(ERR,
681                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
682                        old_owner_id, new_owner->id);
683                 return -EINVAL;
684         }
685
686         port_owner = &rte_eth_devices[port_id].data->owner;
687         if (port_owner->id != old_owner_id) {
688                 RTE_ETHDEV_LOG(ERR,
689                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
690                         port_id, port_owner->name, port_owner->id);
691                 return -EPERM;
692         }
693
694         /* can not truncate (same structure) */
695         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
696
697         port_owner->id = new_owner->id;
698
699         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
700                 port_id, new_owner->name, new_owner->id);
701
702         return 0;
703 }
704
705 int
706 rte_eth_dev_owner_set(const uint16_t port_id,
707                       const struct rte_eth_dev_owner *owner)
708 {
709         int ret;
710
711         eth_dev_shared_data_prepare();
712
713         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
714
715         ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
716
717         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
718         return ret;
719 }
720
721 int
722 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
723 {
724         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
725                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
726         int ret;
727
728         eth_dev_shared_data_prepare();
729
730         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
731
732         ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
733
734         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
735         return ret;
736 }
737
738 int
739 rte_eth_dev_owner_delete(const uint64_t owner_id)
740 {
741         uint16_t port_id;
742         int ret = 0;
743
744         eth_dev_shared_data_prepare();
745
746         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
747
748         if (eth_is_valid_owner_id(owner_id)) {
749                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
750                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
751                                 memset(&rte_eth_devices[port_id].data->owner, 0,
752                                        sizeof(struct rte_eth_dev_owner));
753                 RTE_ETHDEV_LOG(NOTICE,
754                         "All port owners owned by %016"PRIx64" identifier have removed\n",
755                         owner_id);
756         } else {
757                 RTE_ETHDEV_LOG(ERR,
758                                "Invalid owner id=%016"PRIx64"\n",
759                                owner_id);
760                 ret = -EINVAL;
761         }
762
763         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
764
765         return ret;
766 }
767
768 int
769 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
770 {
771         struct rte_eth_dev *ethdev;
772
773         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
774         ethdev = &rte_eth_devices[port_id];
775
776         if (!eth_dev_is_allocated(ethdev)) {
777                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
778                         port_id);
779                 return -ENODEV;
780         }
781
782         if (owner == NULL) {
783                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
784                         port_id);
785                 return -EINVAL;
786         }
787
788         eth_dev_shared_data_prepare();
789
790         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
791         rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
792         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
793
794         return 0;
795 }
796
797 int
798 rte_eth_dev_socket_id(uint16_t port_id)
799 {
800         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
801         return rte_eth_devices[port_id].data->numa_node;
802 }
803
804 void *
805 rte_eth_dev_get_sec_ctx(uint16_t port_id)
806 {
807         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
808         return rte_eth_devices[port_id].security_ctx;
809 }
810
811 uint16_t
812 rte_eth_dev_count_avail(void)
813 {
814         uint16_t p;
815         uint16_t count;
816
817         count = 0;
818
819         RTE_ETH_FOREACH_DEV(p)
820                 count++;
821
822         return count;
823 }
824
825 uint16_t
826 rte_eth_dev_count_total(void)
827 {
828         uint16_t port, count = 0;
829
830         RTE_ETH_FOREACH_VALID_DEV(port)
831                 count++;
832
833         return count;
834 }
835
836 int
837 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
838 {
839         char *tmp;
840
841         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
842
843         if (name == NULL) {
844                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
845                         port_id);
846                 return -EINVAL;
847         }
848
849         /* shouldn't check 'rte_eth_devices[i].data',
850          * because it might be overwritten by VDEV PMD */
851         tmp = eth_dev_shared_data->data[port_id].name;
852         strcpy(name, tmp);
853         return 0;
854 }
855
856 int
857 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
858 {
859         uint16_t pid;
860
861         if (name == NULL) {
862                 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
863                 return -EINVAL;
864         }
865
866         if (port_id == NULL) {
867                 RTE_ETHDEV_LOG(ERR,
868                         "Cannot get port ID to NULL for %s\n", name);
869                 return -EINVAL;
870         }
871
872         RTE_ETH_FOREACH_VALID_DEV(pid)
873                 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
874                         *port_id = pid;
875                         return 0;
876                 }
877
878         return -ENODEV;
879 }
880
881 static int
882 eth_err(uint16_t port_id, int ret)
883 {
884         if (ret == 0)
885                 return 0;
886         if (rte_eth_dev_is_removed(port_id))
887                 return -EIO;
888         return ret;
889 }
890
891 static int
892 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
893 {
894         uint16_t old_nb_queues = dev->data->nb_rx_queues;
895         void **rxq;
896         unsigned i;
897
898         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
899                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
900                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
901                                 RTE_CACHE_LINE_SIZE);
902                 if (dev->data->rx_queues == NULL) {
903                         dev->data->nb_rx_queues = 0;
904                         return -(ENOMEM);
905                 }
906         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
907                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
908
909                 rxq = dev->data->rx_queues;
910
911                 for (i = nb_queues; i < old_nb_queues; i++)
912                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
913                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
914                                 RTE_CACHE_LINE_SIZE);
915                 if (rxq == NULL)
916                         return -(ENOMEM);
917                 if (nb_queues > old_nb_queues) {
918                         uint16_t new_qs = nb_queues - old_nb_queues;
919
920                         memset(rxq + old_nb_queues, 0,
921                                 sizeof(rxq[0]) * new_qs);
922                 }
923
924                 dev->data->rx_queues = rxq;
925
926         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
927                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
928
929                 rxq = dev->data->rx_queues;
930
931                 for (i = nb_queues; i < old_nb_queues; i++)
932                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
933
934                 rte_free(dev->data->rx_queues);
935                 dev->data->rx_queues = NULL;
936         }
937         dev->data->nb_rx_queues = nb_queues;
938         return 0;
939 }
940
941 static int
942 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
943 {
944         uint16_t port_id;
945
946         if (rx_queue_id >= dev->data->nb_rx_queues) {
947                 port_id = dev->data->port_id;
948                 RTE_ETHDEV_LOG(ERR,
949                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
950                                rx_queue_id, port_id);
951                 return -EINVAL;
952         }
953
954         if (dev->data->rx_queues[rx_queue_id] == NULL) {
955                 port_id = dev->data->port_id;
956                 RTE_ETHDEV_LOG(ERR,
957                                "Queue %u of device with port_id=%u has not been setup\n",
958                                rx_queue_id, port_id);
959                 return -EINVAL;
960         }
961
962         return 0;
963 }
964
965 static int
966 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
967 {
968         uint16_t port_id;
969
970         if (tx_queue_id >= dev->data->nb_tx_queues) {
971                 port_id = dev->data->port_id;
972                 RTE_ETHDEV_LOG(ERR,
973                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
974                                tx_queue_id, port_id);
975                 return -EINVAL;
976         }
977
978         if (dev->data->tx_queues[tx_queue_id] == NULL) {
979                 port_id = dev->data->port_id;
980                 RTE_ETHDEV_LOG(ERR,
981                                "Queue %u of device with port_id=%u has not been setup\n",
982                                tx_queue_id, port_id);
983                 return -EINVAL;
984         }
985
986         return 0;
987 }
988
989 int
990 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
991 {
992         struct rte_eth_dev *dev;
993         int ret;
994
995         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
996         dev = &rte_eth_devices[port_id];
997
998         if (!dev->data->dev_started) {
999                 RTE_ETHDEV_LOG(ERR,
1000                         "Port %u must be started before start any queue\n",
1001                         port_id);
1002                 return -EINVAL;
1003         }
1004
1005         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1006         if (ret != 0)
1007                 return ret;
1008
1009         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
1010
1011         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1012                 RTE_ETHDEV_LOG(INFO,
1013                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1014                         rx_queue_id, port_id);
1015                 return -EINVAL;
1016         }
1017
1018         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1019                 RTE_ETHDEV_LOG(INFO,
1020                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1021                         rx_queue_id, port_id);
1022                 return 0;
1023         }
1024
1025         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
1026 }
1027
1028 int
1029 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
1030 {
1031         struct rte_eth_dev *dev;
1032         int ret;
1033
1034         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1035         dev = &rte_eth_devices[port_id];
1036
1037         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1038         if (ret != 0)
1039                 return ret;
1040
1041         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
1042
1043         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1044                 RTE_ETHDEV_LOG(INFO,
1045                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1046                         rx_queue_id, port_id);
1047                 return -EINVAL;
1048         }
1049
1050         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1051                 RTE_ETHDEV_LOG(INFO,
1052                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1053                         rx_queue_id, port_id);
1054                 return 0;
1055         }
1056
1057         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1058 }
1059
1060 int
1061 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1062 {
1063         struct rte_eth_dev *dev;
1064         int ret;
1065
1066         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1067         dev = &rte_eth_devices[port_id];
1068
1069         if (!dev->data->dev_started) {
1070                 RTE_ETHDEV_LOG(ERR,
1071                         "Port %u must be started before start any queue\n",
1072                         port_id);
1073                 return -EINVAL;
1074         }
1075
1076         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1077         if (ret != 0)
1078                 return ret;
1079
1080         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1081
1082         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1083                 RTE_ETHDEV_LOG(INFO,
1084                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1085                         tx_queue_id, port_id);
1086                 return -EINVAL;
1087         }
1088
1089         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1090                 RTE_ETHDEV_LOG(INFO,
1091                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1092                         tx_queue_id, port_id);
1093                 return 0;
1094         }
1095
1096         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1097 }
1098
1099 int
1100 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1101 {
1102         struct rte_eth_dev *dev;
1103         int ret;
1104
1105         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1106         dev = &rte_eth_devices[port_id];
1107
1108         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1109         if (ret != 0)
1110                 return ret;
1111
1112         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1113
1114         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1115                 RTE_ETHDEV_LOG(INFO,
1116                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1117                         tx_queue_id, port_id);
1118                 return -EINVAL;
1119         }
1120
1121         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1122                 RTE_ETHDEV_LOG(INFO,
1123                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1124                         tx_queue_id, port_id);
1125                 return 0;
1126         }
1127
1128         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1129 }
1130
1131 static int
1132 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1133 {
1134         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1135         void **txq;
1136         unsigned i;
1137
1138         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1139                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1140                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1141                                                    RTE_CACHE_LINE_SIZE);
1142                 if (dev->data->tx_queues == NULL) {
1143                         dev->data->nb_tx_queues = 0;
1144                         return -(ENOMEM);
1145                 }
1146         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1147                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1148
1149                 txq = dev->data->tx_queues;
1150
1151                 for (i = nb_queues; i < old_nb_queues; i++)
1152                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1153                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1154                                   RTE_CACHE_LINE_SIZE);
1155                 if (txq == NULL)
1156                         return -ENOMEM;
1157                 if (nb_queues > old_nb_queues) {
1158                         uint16_t new_qs = nb_queues - old_nb_queues;
1159
1160                         memset(txq + old_nb_queues, 0,
1161                                sizeof(txq[0]) * new_qs);
1162                 }
1163
1164                 dev->data->tx_queues = txq;
1165
1166         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1167                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1168
1169                 txq = dev->data->tx_queues;
1170
1171                 for (i = nb_queues; i < old_nb_queues; i++)
1172                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1173
1174                 rte_free(dev->data->tx_queues);
1175                 dev->data->tx_queues = NULL;
1176         }
1177         dev->data->nb_tx_queues = nb_queues;
1178         return 0;
1179 }
1180
1181 uint32_t
1182 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1183 {
1184         switch (speed) {
1185         case ETH_SPEED_NUM_10M:
1186                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1187         case ETH_SPEED_NUM_100M:
1188                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1189         case ETH_SPEED_NUM_1G:
1190                 return ETH_LINK_SPEED_1G;
1191         case ETH_SPEED_NUM_2_5G:
1192                 return ETH_LINK_SPEED_2_5G;
1193         case ETH_SPEED_NUM_5G:
1194                 return ETH_LINK_SPEED_5G;
1195         case ETH_SPEED_NUM_10G:
1196                 return ETH_LINK_SPEED_10G;
1197         case ETH_SPEED_NUM_20G:
1198                 return ETH_LINK_SPEED_20G;
1199         case ETH_SPEED_NUM_25G:
1200                 return ETH_LINK_SPEED_25G;
1201         case ETH_SPEED_NUM_40G:
1202                 return ETH_LINK_SPEED_40G;
1203         case ETH_SPEED_NUM_50G:
1204                 return ETH_LINK_SPEED_50G;
1205         case ETH_SPEED_NUM_56G:
1206                 return ETH_LINK_SPEED_56G;
1207         case ETH_SPEED_NUM_100G:
1208                 return ETH_LINK_SPEED_100G;
1209         case ETH_SPEED_NUM_200G:
1210                 return ETH_LINK_SPEED_200G;
1211         default:
1212                 return 0;
1213         }
1214 }
1215
1216 const char *
1217 rte_eth_dev_rx_offload_name(uint64_t offload)
1218 {
1219         const char *name = "UNKNOWN";
1220         unsigned int i;
1221
1222         for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1223                 if (offload == eth_dev_rx_offload_names[i].offload) {
1224                         name = eth_dev_rx_offload_names[i].name;
1225                         break;
1226                 }
1227         }
1228
1229         return name;
1230 }
1231
1232 const char *
1233 rte_eth_dev_tx_offload_name(uint64_t offload)
1234 {
1235         const char *name = "UNKNOWN";
1236         unsigned int i;
1237
1238         for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1239                 if (offload == eth_dev_tx_offload_names[i].offload) {
1240                         name = eth_dev_tx_offload_names[i].name;
1241                         break;
1242                 }
1243         }
1244
1245         return name;
1246 }
1247
1248 static inline int
1249 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1250                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1251 {
1252         int ret = 0;
1253
1254         if (dev_info_size == 0) {
1255                 if (config_size != max_rx_pkt_len) {
1256                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1257                                        " %u != %u is not allowed\n",
1258                                        port_id, config_size, max_rx_pkt_len);
1259                         ret = -EINVAL;
1260                 }
1261         } else if (config_size > dev_info_size) {
1262                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1263                                "> max allowed value %u\n", port_id, config_size,
1264                                dev_info_size);
1265                 ret = -EINVAL;
1266         } else if (config_size < RTE_ETHER_MIN_LEN) {
1267                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1268                                "< min allowed value %u\n", port_id, config_size,
1269                                (unsigned int)RTE_ETHER_MIN_LEN);
1270                 ret = -EINVAL;
1271         }
1272         return ret;
1273 }
1274
1275 /*
1276  * Validate offloads that are requested through rte_eth_dev_configure against
1277  * the offloads successfully set by the ethernet device.
1278  *
1279  * @param port_id
1280  *   The port identifier of the Ethernet device.
1281  * @param req_offloads
1282  *   The offloads that have been requested through `rte_eth_dev_configure`.
1283  * @param set_offloads
1284  *   The offloads successfully set by the ethernet device.
1285  * @param offload_type
1286  *   The offload type i.e. Rx/Tx string.
1287  * @param offload_name
1288  *   The function that prints the offload name.
1289  * @return
1290  *   - (0) if validation successful.
1291  *   - (-EINVAL) if requested offload has been silently disabled.
1292  *
1293  */
1294 static int
1295 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1296                   uint64_t set_offloads, const char *offload_type,
1297                   const char *(*offload_name)(uint64_t))
1298 {
1299         uint64_t offloads_diff = req_offloads ^ set_offloads;
1300         uint64_t offload;
1301         int ret = 0;
1302
1303         while (offloads_diff != 0) {
1304                 /* Check if any offload is requested but not enabled. */
1305                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1306                 if (offload & req_offloads) {
1307                         RTE_ETHDEV_LOG(ERR,
1308                                 "Port %u failed to enable %s offload %s\n",
1309                                 port_id, offload_type, offload_name(offload));
1310                         ret = -EINVAL;
1311                 }
1312
1313                 /* Check if offload couldn't be disabled. */
1314                 if (offload & set_offloads) {
1315                         RTE_ETHDEV_LOG(DEBUG,
1316                                 "Port %u %s offload %s is not requested but enabled\n",
1317                                 port_id, offload_type, offload_name(offload));
1318                 }
1319
1320                 offloads_diff &= ~offload;
1321         }
1322
1323         return ret;
1324 }
1325
1326 int
1327 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1328                       const struct rte_eth_conf *dev_conf)
1329 {
1330         struct rte_eth_dev *dev;
1331         struct rte_eth_dev_info dev_info;
1332         struct rte_eth_conf orig_conf;
1333         uint16_t overhead_len;
1334         int diag;
1335         int ret;
1336         uint16_t old_mtu;
1337
1338         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1339         dev = &rte_eth_devices[port_id];
1340
1341         if (dev_conf == NULL) {
1342                 RTE_ETHDEV_LOG(ERR,
1343                         "Cannot configure ethdev port %u from NULL config\n",
1344                         port_id);
1345                 return -EINVAL;
1346         }
1347
1348         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1349
1350         if (dev->data->dev_started) {
1351                 RTE_ETHDEV_LOG(ERR,
1352                         "Port %u must be stopped to allow configuration\n",
1353                         port_id);
1354                 return -EBUSY;
1355         }
1356
1357          /* Store original config, as rollback required on failure */
1358         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1359
1360         /*
1361          * Copy the dev_conf parameter into the dev structure.
1362          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1363          */
1364         if (dev_conf != &dev->data->dev_conf)
1365                 memcpy(&dev->data->dev_conf, dev_conf,
1366                        sizeof(dev->data->dev_conf));
1367
1368         /* Backup mtu for rollback */
1369         old_mtu = dev->data->mtu;
1370
1371         ret = rte_eth_dev_info_get(port_id, &dev_info);
1372         if (ret != 0)
1373                 goto rollback;
1374
1375         /* Get the real Ethernet overhead length */
1376         if (dev_info.max_mtu != UINT16_MAX &&
1377             dev_info.max_rx_pktlen > dev_info.max_mtu)
1378                 overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
1379         else
1380                 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1381
1382         /* If number of queues specified by application for both Rx and Tx is
1383          * zero, use driver preferred values. This cannot be done individually
1384          * as it is valid for either Tx or Rx (but not both) to be zero.
1385          * If driver does not provide any preferred valued, fall back on
1386          * EAL defaults.
1387          */
1388         if (nb_rx_q == 0 && nb_tx_q == 0) {
1389                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1390                 if (nb_rx_q == 0)
1391                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1392                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1393                 if (nb_tx_q == 0)
1394                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1395         }
1396
1397         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1398                 RTE_ETHDEV_LOG(ERR,
1399                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1400                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1401                 ret = -EINVAL;
1402                 goto rollback;
1403         }
1404
1405         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1406                 RTE_ETHDEV_LOG(ERR,
1407                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1408                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1409                 ret = -EINVAL;
1410                 goto rollback;
1411         }
1412
1413         /*
1414          * Check that the numbers of RX and TX queues are not greater
1415          * than the maximum number of RX and TX queues supported by the
1416          * configured device.
1417          */
1418         if (nb_rx_q > dev_info.max_rx_queues) {
1419                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1420                         port_id, nb_rx_q, dev_info.max_rx_queues);
1421                 ret = -EINVAL;
1422                 goto rollback;
1423         }
1424
1425         if (nb_tx_q > dev_info.max_tx_queues) {
1426                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1427                         port_id, nb_tx_q, dev_info.max_tx_queues);
1428                 ret = -EINVAL;
1429                 goto rollback;
1430         }
1431
1432         /* Check that the device supports requested interrupts */
1433         if ((dev_conf->intr_conf.lsc == 1) &&
1434                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1435                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1436                         dev->device->driver->name);
1437                 ret = -EINVAL;
1438                 goto rollback;
1439         }
1440         if ((dev_conf->intr_conf.rmv == 1) &&
1441                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1442                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1443                         dev->device->driver->name);
1444                 ret = -EINVAL;
1445                 goto rollback;
1446         }
1447
1448         /*
1449          * If jumbo frames are enabled, check that the maximum RX packet
1450          * length is supported by the configured device.
1451          */
1452         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1453                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1454                         RTE_ETHDEV_LOG(ERR,
1455                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1456                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1457                                 dev_info.max_rx_pktlen);
1458                         ret = -EINVAL;
1459                         goto rollback;
1460                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1461                         RTE_ETHDEV_LOG(ERR,
1462                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1463                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1464                                 (unsigned int)RTE_ETHER_MIN_LEN);
1465                         ret = -EINVAL;
1466                         goto rollback;
1467                 }
1468
1469                 /* Scale the MTU size to adapt max_rx_pkt_len */
1470                 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
1471                                 overhead_len;
1472         } else {
1473                 uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
1474                 if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
1475                     pktlen > RTE_ETHER_MTU + overhead_len)
1476                         /* Use default value */
1477                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1478                                                 RTE_ETHER_MTU + overhead_len;
1479         }
1480
1481         /*
1482          * If LRO is enabled, check that the maximum aggregated packet
1483          * size is supported by the configured device.
1484          */
1485         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1486                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1487                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1488                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1489                 ret = eth_dev_check_lro_pkt_size(port_id,
1490                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1491                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1492                                 dev_info.max_lro_pkt_size);
1493                 if (ret != 0)
1494                         goto rollback;
1495         }
1496
1497         /* Any requested offloading must be within its device capabilities */
1498         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1499              dev_conf->rxmode.offloads) {
1500                 RTE_ETHDEV_LOG(ERR,
1501                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1502                         "capabilities 0x%"PRIx64" in %s()\n",
1503                         port_id, dev_conf->rxmode.offloads,
1504                         dev_info.rx_offload_capa,
1505                         __func__);
1506                 ret = -EINVAL;
1507                 goto rollback;
1508         }
1509         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1510              dev_conf->txmode.offloads) {
1511                 RTE_ETHDEV_LOG(ERR,
1512                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1513                         "capabilities 0x%"PRIx64" in %s()\n",
1514                         port_id, dev_conf->txmode.offloads,
1515                         dev_info.tx_offload_capa,
1516                         __func__);
1517                 ret = -EINVAL;
1518                 goto rollback;
1519         }
1520
1521         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1522                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1523
1524         /* Check that device supports requested rss hash functions. */
1525         if ((dev_info.flow_type_rss_offloads |
1526              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1527             dev_info.flow_type_rss_offloads) {
1528                 RTE_ETHDEV_LOG(ERR,
1529                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1530                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1531                         dev_info.flow_type_rss_offloads);
1532                 ret = -EINVAL;
1533                 goto rollback;
1534         }
1535
1536         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1537         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1538             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1539                 RTE_ETHDEV_LOG(ERR,
1540                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1541                         port_id,
1542                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1543                 ret = -EINVAL;
1544                 goto rollback;
1545         }
1546
1547         /*
1548          * Setup new number of RX/TX queues and reconfigure device.
1549          */
1550         diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1551         if (diag != 0) {
1552                 RTE_ETHDEV_LOG(ERR,
1553                         "Port%u eth_dev_rx_queue_config = %d\n",
1554                         port_id, diag);
1555                 ret = diag;
1556                 goto rollback;
1557         }
1558
1559         diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1560         if (diag != 0) {
1561                 RTE_ETHDEV_LOG(ERR,
1562                         "Port%u eth_dev_tx_queue_config = %d\n",
1563                         port_id, diag);
1564                 eth_dev_rx_queue_config(dev, 0);
1565                 ret = diag;
1566                 goto rollback;
1567         }
1568
1569         diag = (*dev->dev_ops->dev_configure)(dev);
1570         if (diag != 0) {
1571                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1572                         port_id, diag);
1573                 ret = eth_err(port_id, diag);
1574                 goto reset_queues;
1575         }
1576
1577         /* Initialize Rx profiling if enabled at compilation time. */
1578         diag = __rte_eth_dev_profile_init(port_id, dev);
1579         if (diag != 0) {
1580                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1581                         port_id, diag);
1582                 ret = eth_err(port_id, diag);
1583                 goto reset_queues;
1584         }
1585
1586         /* Validate Rx offloads. */
1587         diag = eth_dev_validate_offloads(port_id,
1588                         dev_conf->rxmode.offloads,
1589                         dev->data->dev_conf.rxmode.offloads, "Rx",
1590                         rte_eth_dev_rx_offload_name);
1591         if (diag != 0) {
1592                 ret = diag;
1593                 goto reset_queues;
1594         }
1595
1596         /* Validate Tx offloads. */
1597         diag = eth_dev_validate_offloads(port_id,
1598                         dev_conf->txmode.offloads,
1599                         dev->data->dev_conf.txmode.offloads, "Tx",
1600                         rte_eth_dev_tx_offload_name);
1601         if (diag != 0) {
1602                 ret = diag;
1603                 goto reset_queues;
1604         }
1605
1606         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1607         return 0;
1608 reset_queues:
1609         eth_dev_rx_queue_config(dev, 0);
1610         eth_dev_tx_queue_config(dev, 0);
1611 rollback:
1612         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1613         if (old_mtu != dev->data->mtu)
1614                 dev->data->mtu = old_mtu;
1615
1616         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1617         return ret;
1618 }
1619
1620 void
1621 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1622 {
1623         if (dev->data->dev_started) {
1624                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1625                         dev->data->port_id);
1626                 return;
1627         }
1628
1629         eth_dev_rx_queue_config(dev, 0);
1630         eth_dev_tx_queue_config(dev, 0);
1631
1632         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1633 }
1634
1635 static void
1636 eth_dev_mac_restore(struct rte_eth_dev *dev,
1637                         struct rte_eth_dev_info *dev_info)
1638 {
1639         struct rte_ether_addr *addr;
1640         uint16_t i;
1641         uint32_t pool = 0;
1642         uint64_t pool_mask;
1643
1644         /* replay MAC address configuration including default MAC */
1645         addr = &dev->data->mac_addrs[0];
1646         if (*dev->dev_ops->mac_addr_set != NULL)
1647                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1648         else if (*dev->dev_ops->mac_addr_add != NULL)
1649                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1650
1651         if (*dev->dev_ops->mac_addr_add != NULL) {
1652                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1653                         addr = &dev->data->mac_addrs[i];
1654
1655                         /* skip zero address */
1656                         if (rte_is_zero_ether_addr(addr))
1657                                 continue;
1658
1659                         pool = 0;
1660                         pool_mask = dev->data->mac_pool_sel[i];
1661
1662                         do {
1663                                 if (pool_mask & 1ULL)
1664                                         (*dev->dev_ops->mac_addr_add)(dev,
1665                                                 addr, i, pool);
1666                                 pool_mask >>= 1;
1667                                 pool++;
1668                         } while (pool_mask);
1669                 }
1670         }
1671 }
1672
1673 static int
1674 eth_dev_config_restore(struct rte_eth_dev *dev,
1675                 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1676 {
1677         int ret;
1678
1679         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1680                 eth_dev_mac_restore(dev, dev_info);
1681
1682         /* replay promiscuous configuration */
1683         /*
1684          * use callbacks directly since we don't need port_id check and
1685          * would like to bypass the same value set
1686          */
1687         if (rte_eth_promiscuous_get(port_id) == 1 &&
1688             *dev->dev_ops->promiscuous_enable != NULL) {
1689                 ret = eth_err(port_id,
1690                               (*dev->dev_ops->promiscuous_enable)(dev));
1691                 if (ret != 0 && ret != -ENOTSUP) {
1692                         RTE_ETHDEV_LOG(ERR,
1693                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1694                                 port_id, rte_strerror(-ret));
1695                         return ret;
1696                 }
1697         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1698                    *dev->dev_ops->promiscuous_disable != NULL) {
1699                 ret = eth_err(port_id,
1700                               (*dev->dev_ops->promiscuous_disable)(dev));
1701                 if (ret != 0 && ret != -ENOTSUP) {
1702                         RTE_ETHDEV_LOG(ERR,
1703                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1704                                 port_id, rte_strerror(-ret));
1705                         return ret;
1706                 }
1707         }
1708
1709         /* replay all multicast configuration */
1710         /*
1711          * use callbacks directly since we don't need port_id check and
1712          * would like to bypass the same value set
1713          */
1714         if (rte_eth_allmulticast_get(port_id) == 1 &&
1715             *dev->dev_ops->allmulticast_enable != NULL) {
1716                 ret = eth_err(port_id,
1717                               (*dev->dev_ops->allmulticast_enable)(dev));
1718                 if (ret != 0 && ret != -ENOTSUP) {
1719                         RTE_ETHDEV_LOG(ERR,
1720                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1721                                 port_id, rte_strerror(-ret));
1722                         return ret;
1723                 }
1724         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1725                    *dev->dev_ops->allmulticast_disable != NULL) {
1726                 ret = eth_err(port_id,
1727                               (*dev->dev_ops->allmulticast_disable)(dev));
1728                 if (ret != 0 && ret != -ENOTSUP) {
1729                         RTE_ETHDEV_LOG(ERR,
1730                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1731                                 port_id, rte_strerror(-ret));
1732                         return ret;
1733                 }
1734         }
1735
1736         return 0;
1737 }
1738
1739 int
1740 rte_eth_dev_start(uint16_t port_id)
1741 {
1742         struct rte_eth_dev *dev;
1743         struct rte_eth_dev_info dev_info;
1744         int diag;
1745         int ret, ret_stop;
1746
1747         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1748         dev = &rte_eth_devices[port_id];
1749
1750         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1751
1752         if (dev->data->dev_started != 0) {
1753                 RTE_ETHDEV_LOG(INFO,
1754                         "Device with port_id=%"PRIu16" already started\n",
1755                         port_id);
1756                 return 0;
1757         }
1758
1759         ret = rte_eth_dev_info_get(port_id, &dev_info);
1760         if (ret != 0)
1761                 return ret;
1762
1763         /* Lets restore MAC now if device does not support live change */
1764         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1765                 eth_dev_mac_restore(dev, &dev_info);
1766
1767         diag = (*dev->dev_ops->dev_start)(dev);
1768         if (diag == 0)
1769                 dev->data->dev_started = 1;
1770         else
1771                 return eth_err(port_id, diag);
1772
1773         ret = eth_dev_config_restore(dev, &dev_info, port_id);
1774         if (ret != 0) {
1775                 RTE_ETHDEV_LOG(ERR,
1776                         "Error during restoring configuration for device (port %u): %s\n",
1777                         port_id, rte_strerror(-ret));
1778                 ret_stop = rte_eth_dev_stop(port_id);
1779                 if (ret_stop != 0) {
1780                         RTE_ETHDEV_LOG(ERR,
1781                                 "Failed to stop device (port %u): %s\n",
1782                                 port_id, rte_strerror(-ret_stop));
1783                 }
1784
1785                 return ret;
1786         }
1787
1788         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1789                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1790                 (*dev->dev_ops->link_update)(dev, 0);
1791         }
1792
1793         rte_ethdev_trace_start(port_id);
1794         return 0;
1795 }
1796
1797 int
1798 rte_eth_dev_stop(uint16_t port_id)
1799 {
1800         struct rte_eth_dev *dev;
1801         int ret;
1802
1803         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1804         dev = &rte_eth_devices[port_id];
1805
1806         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1807
1808         if (dev->data->dev_started == 0) {
1809                 RTE_ETHDEV_LOG(INFO,
1810                         "Device with port_id=%"PRIu16" already stopped\n",
1811                         port_id);
1812                 return 0;
1813         }
1814
1815         dev->data->dev_started = 0;
1816         ret = (*dev->dev_ops->dev_stop)(dev);
1817         rte_ethdev_trace_stop(port_id, ret);
1818
1819         return ret;
1820 }
1821
1822 int
1823 rte_eth_dev_set_link_up(uint16_t port_id)
1824 {
1825         struct rte_eth_dev *dev;
1826
1827         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1828         dev = &rte_eth_devices[port_id];
1829
1830         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1831         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1832 }
1833
1834 int
1835 rte_eth_dev_set_link_down(uint16_t port_id)
1836 {
1837         struct rte_eth_dev *dev;
1838
1839         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1840         dev = &rte_eth_devices[port_id];
1841
1842         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1843         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1844 }
1845
1846 int
1847 rte_eth_dev_close(uint16_t port_id)
1848 {
1849         struct rte_eth_dev *dev;
1850         int firsterr, binerr;
1851         int *lasterr = &firsterr;
1852
1853         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1854         dev = &rte_eth_devices[port_id];
1855
1856         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1857         *lasterr = (*dev->dev_ops->dev_close)(dev);
1858         if (*lasterr != 0)
1859                 lasterr = &binerr;
1860
1861         rte_ethdev_trace_close(port_id);
1862         *lasterr = rte_eth_dev_release_port(dev);
1863
1864         return firsterr;
1865 }
1866
1867 int
1868 rte_eth_dev_reset(uint16_t port_id)
1869 {
1870         struct rte_eth_dev *dev;
1871         int ret;
1872
1873         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1874         dev = &rte_eth_devices[port_id];
1875
1876         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1877
1878         ret = rte_eth_dev_stop(port_id);
1879         if (ret != 0) {
1880                 RTE_ETHDEV_LOG(ERR,
1881                         "Failed to stop device (port %u) before reset: %s - ignore\n",
1882                         port_id, rte_strerror(-ret));
1883         }
1884         ret = dev->dev_ops->dev_reset(dev);
1885
1886         return eth_err(port_id, ret);
1887 }
1888
1889 int
1890 rte_eth_dev_is_removed(uint16_t port_id)
1891 {
1892         struct rte_eth_dev *dev;
1893         int ret;
1894
1895         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1896         dev = &rte_eth_devices[port_id];
1897
1898         if (dev->state == RTE_ETH_DEV_REMOVED)
1899                 return 1;
1900
1901         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1902
1903         ret = dev->dev_ops->is_removed(dev);
1904         if (ret != 0)
1905                 /* Device is physically removed. */
1906                 dev->state = RTE_ETH_DEV_REMOVED;
1907
1908         return ret;
1909 }
1910
1911 static int
1912 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1913                              uint16_t n_seg, uint32_t *mbp_buf_size,
1914                              const struct rte_eth_dev_info *dev_info)
1915 {
1916         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1917         struct rte_mempool *mp_first;
1918         uint32_t offset_mask;
1919         uint16_t seg_idx;
1920
1921         if (n_seg > seg_capa->max_nseg) {
1922                 RTE_ETHDEV_LOG(ERR,
1923                                "Requested Rx segments %u exceed supported %u\n",
1924                                n_seg, seg_capa->max_nseg);
1925                 return -EINVAL;
1926         }
1927         /*
1928          * Check the sizes and offsets against buffer sizes
1929          * for each segment specified in extended configuration.
1930          */
1931         mp_first = rx_seg[0].mp;
1932         offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1933         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1934                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1935                 uint32_t length = rx_seg[seg_idx].length;
1936                 uint32_t offset = rx_seg[seg_idx].offset;
1937
1938                 if (mpl == NULL) {
1939                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1940                         return -EINVAL;
1941                 }
1942                 if (seg_idx != 0 && mp_first != mpl &&
1943                     seg_capa->multi_pools == 0) {
1944                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1945                         return -ENOTSUP;
1946                 }
1947                 if (offset != 0) {
1948                         if (seg_capa->offset_allowed == 0) {
1949                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1950                                 return -ENOTSUP;
1951                         }
1952                         if (offset & offset_mask) {
1953                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1954                                                offset,
1955                                                seg_capa->offset_align_log2);
1956                                 return -EINVAL;
1957                         }
1958                 }
1959                 if (mpl->private_data_size <
1960                         sizeof(struct rte_pktmbuf_pool_private)) {
1961                         RTE_ETHDEV_LOG(ERR,
1962                                        "%s private_data_size %u < %u\n",
1963                                        mpl->name, mpl->private_data_size,
1964                                        (unsigned int)sizeof
1965                                         (struct rte_pktmbuf_pool_private));
1966                         return -ENOSPC;
1967                 }
1968                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1969                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1970                 length = length != 0 ? length : *mbp_buf_size;
1971                 if (*mbp_buf_size < length + offset) {
1972                         RTE_ETHDEV_LOG(ERR,
1973                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1974                                        mpl->name, *mbp_buf_size,
1975                                        length + offset, length, offset);
1976                         return -EINVAL;
1977                 }
1978         }
1979         return 0;
1980 }
1981
1982 int
1983 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1984                        uint16_t nb_rx_desc, unsigned int socket_id,
1985                        const struct rte_eth_rxconf *rx_conf,
1986                        struct rte_mempool *mp)
1987 {
1988         int ret;
1989         uint32_t mbp_buf_size;
1990         struct rte_eth_dev *dev;
1991         struct rte_eth_dev_info dev_info;
1992         struct rte_eth_rxconf local_conf;
1993         void **rxq;
1994
1995         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1996         dev = &rte_eth_devices[port_id];
1997
1998         if (rx_queue_id >= dev->data->nb_rx_queues) {
1999                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2000                 return -EINVAL;
2001         }
2002
2003         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
2004
2005         ret = rte_eth_dev_info_get(port_id, &dev_info);
2006         if (ret != 0)
2007                 return ret;
2008
2009         if (mp != NULL) {
2010                 /* Single pool configuration check. */
2011                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
2012                         RTE_ETHDEV_LOG(ERR,
2013                                        "Ambiguous segment configuration\n");
2014                         return -EINVAL;
2015                 }
2016                 /*
2017                  * Check the size of the mbuf data buffer, this value
2018                  * must be provided in the private data of the memory pool.
2019                  * First check that the memory pool(s) has a valid private data.
2020                  */
2021                 if (mp->private_data_size <
2022                                 sizeof(struct rte_pktmbuf_pool_private)) {
2023                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
2024                                 mp->name, mp->private_data_size,
2025                                 (unsigned int)
2026                                 sizeof(struct rte_pktmbuf_pool_private));
2027                         return -ENOSPC;
2028                 }
2029                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
2030                 if (mbp_buf_size < dev_info.min_rx_bufsize +
2031                                    RTE_PKTMBUF_HEADROOM) {
2032                         RTE_ETHDEV_LOG(ERR,
2033                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
2034                                        mp->name, mbp_buf_size,
2035                                        RTE_PKTMBUF_HEADROOM +
2036                                        dev_info.min_rx_bufsize,
2037                                        RTE_PKTMBUF_HEADROOM,
2038                                        dev_info.min_rx_bufsize);
2039                         return -EINVAL;
2040                 }
2041         } else {
2042                 const struct rte_eth_rxseg_split *rx_seg;
2043                 uint16_t n_seg;
2044
2045                 /* Extended multi-segment configuration check. */
2046                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
2047                         RTE_ETHDEV_LOG(ERR,
2048                                        "Memory pool is null and no extended configuration provided\n");
2049                         return -EINVAL;
2050                 }
2051
2052                 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2053                 n_seg = rx_conf->rx_nseg;
2054
2055                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2056                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
2057                                                            &mbp_buf_size,
2058                                                            &dev_info);
2059                         if (ret != 0)
2060                                 return ret;
2061                 } else {
2062                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2063                         return -EINVAL;
2064                 }
2065         }
2066
2067         /* Use default specified by driver, if nb_rx_desc is zero */
2068         if (nb_rx_desc == 0) {
2069                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
2070                 /* If driver default is also zero, fall back on EAL default */
2071                 if (nb_rx_desc == 0)
2072                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2073         }
2074
2075         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2076                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2077                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2078
2079                 RTE_ETHDEV_LOG(ERR,
2080                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2081                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2082                         dev_info.rx_desc_lim.nb_min,
2083                         dev_info.rx_desc_lim.nb_align);
2084                 return -EINVAL;
2085         }
2086
2087         if (dev->data->dev_started &&
2088                 !(dev_info.dev_capa &
2089                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2090                 return -EBUSY;
2091
2092         if (dev->data->dev_started &&
2093                 (dev->data->rx_queue_state[rx_queue_id] !=
2094                         RTE_ETH_QUEUE_STATE_STOPPED))
2095                 return -EBUSY;
2096
2097         rxq = dev->data->rx_queues;
2098         if (rxq[rx_queue_id]) {
2099                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2100                                         -ENOTSUP);
2101                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2102                 rxq[rx_queue_id] = NULL;
2103         }
2104
2105         if (rx_conf == NULL)
2106                 rx_conf = &dev_info.default_rxconf;
2107
2108         local_conf = *rx_conf;
2109
2110         /*
2111          * If an offloading has already been enabled in
2112          * rte_eth_dev_configure(), it has been enabled on all queues,
2113          * so there is no need to enable it in this queue again.
2114          * The local_conf.offloads input to underlying PMD only carries
2115          * those offloadings which are only enabled on this queue and
2116          * not enabled on all queues.
2117          */
2118         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2119
2120         /*
2121          * New added offloadings for this queue are those not enabled in
2122          * rte_eth_dev_configure() and they must be per-queue type.
2123          * A pure per-port offloading can't be enabled on a queue while
2124          * disabled on another queue. A pure per-port offloading can't
2125          * be enabled for any queue as new added one if it hasn't been
2126          * enabled in rte_eth_dev_configure().
2127          */
2128         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2129              local_conf.offloads) {
2130                 RTE_ETHDEV_LOG(ERR,
2131                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2132                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2133                         port_id, rx_queue_id, local_conf.offloads,
2134                         dev_info.rx_queue_offload_capa,
2135                         __func__);
2136                 return -EINVAL;
2137         }
2138
2139         /*
2140          * If LRO is enabled, check that the maximum aggregated packet
2141          * size is supported by the configured device.
2142          */
2143         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2144                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2145                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
2146                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2147                 int ret = eth_dev_check_lro_pkt_size(port_id,
2148                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2149                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2150                                 dev_info.max_lro_pkt_size);
2151                 if (ret != 0)
2152                         return ret;
2153         }
2154
2155         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2156                                               socket_id, &local_conf, mp);
2157         if (!ret) {
2158                 if (!dev->data->min_rx_buf_size ||
2159                     dev->data->min_rx_buf_size > mbp_buf_size)
2160                         dev->data->min_rx_buf_size = mbp_buf_size;
2161         }
2162
2163         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2164                 rx_conf, ret);
2165         return eth_err(port_id, ret);
2166 }
2167
2168 int
2169 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2170                                uint16_t nb_rx_desc,
2171                                const struct rte_eth_hairpin_conf *conf)
2172 {
2173         int ret;
2174         struct rte_eth_dev *dev;
2175         struct rte_eth_hairpin_cap cap;
2176         void **rxq;
2177         int i;
2178         int count;
2179
2180         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2181         dev = &rte_eth_devices[port_id];
2182
2183         if (rx_queue_id >= dev->data->nb_rx_queues) {
2184                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2185                 return -EINVAL;
2186         }
2187
2188         if (conf == NULL) {
2189                 RTE_ETHDEV_LOG(ERR,
2190                         "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
2191                         port_id);
2192                 return -EINVAL;
2193         }
2194
2195         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2196         if (ret != 0)
2197                 return ret;
2198         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2199                                 -ENOTSUP);
2200         /* if nb_rx_desc is zero use max number of desc from the driver. */
2201         if (nb_rx_desc == 0)
2202                 nb_rx_desc = cap.max_nb_desc;
2203         if (nb_rx_desc > cap.max_nb_desc) {
2204                 RTE_ETHDEV_LOG(ERR,
2205                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2206                         nb_rx_desc, cap.max_nb_desc);
2207                 return -EINVAL;
2208         }
2209         if (conf->peer_count > cap.max_rx_2_tx) {
2210                 RTE_ETHDEV_LOG(ERR,
2211                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2212                         conf->peer_count, cap.max_rx_2_tx);
2213                 return -EINVAL;
2214         }
2215         if (conf->peer_count == 0) {
2216                 RTE_ETHDEV_LOG(ERR,
2217                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2218                         conf->peer_count);
2219                 return -EINVAL;
2220         }
2221         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2222              cap.max_nb_queues != UINT16_MAX; i++) {
2223                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2224                         count++;
2225         }
2226         if (count > cap.max_nb_queues) {
2227                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2228                 cap.max_nb_queues);
2229                 return -EINVAL;
2230         }
2231         if (dev->data->dev_started)
2232                 return -EBUSY;
2233         rxq = dev->data->rx_queues;
2234         if (rxq[rx_queue_id] != NULL) {
2235                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2236                                         -ENOTSUP);
2237                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2238                 rxq[rx_queue_id] = NULL;
2239         }
2240         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2241                                                       nb_rx_desc, conf);
2242         if (ret == 0)
2243                 dev->data->rx_queue_state[rx_queue_id] =
2244                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2245         return eth_err(port_id, ret);
2246 }
2247
2248 int
2249 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2250                        uint16_t nb_tx_desc, unsigned int socket_id,
2251                        const struct rte_eth_txconf *tx_conf)
2252 {
2253         struct rte_eth_dev *dev;
2254         struct rte_eth_dev_info dev_info;
2255         struct rte_eth_txconf local_conf;
2256         void **txq;
2257         int ret;
2258
2259         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2260         dev = &rte_eth_devices[port_id];
2261
2262         if (tx_queue_id >= dev->data->nb_tx_queues) {
2263                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2264                 return -EINVAL;
2265         }
2266
2267         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2268
2269         ret = rte_eth_dev_info_get(port_id, &dev_info);
2270         if (ret != 0)
2271                 return ret;
2272
2273         /* Use default specified by driver, if nb_tx_desc is zero */
2274         if (nb_tx_desc == 0) {
2275                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2276                 /* If driver default is zero, fall back on EAL default */
2277                 if (nb_tx_desc == 0)
2278                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2279         }
2280         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2281             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2282             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2283                 RTE_ETHDEV_LOG(ERR,
2284                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2285                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2286                         dev_info.tx_desc_lim.nb_min,
2287                         dev_info.tx_desc_lim.nb_align);
2288                 return -EINVAL;
2289         }
2290
2291         if (dev->data->dev_started &&
2292                 !(dev_info.dev_capa &
2293                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2294                 return -EBUSY;
2295
2296         if (dev->data->dev_started &&
2297                 (dev->data->tx_queue_state[tx_queue_id] !=
2298                         RTE_ETH_QUEUE_STATE_STOPPED))
2299                 return -EBUSY;
2300
2301         txq = dev->data->tx_queues;
2302         if (txq[tx_queue_id]) {
2303                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2304                                         -ENOTSUP);
2305                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2306                 txq[tx_queue_id] = NULL;
2307         }
2308
2309         if (tx_conf == NULL)
2310                 tx_conf = &dev_info.default_txconf;
2311
2312         local_conf = *tx_conf;
2313
2314         /*
2315          * If an offloading has already been enabled in
2316          * rte_eth_dev_configure(), it has been enabled on all queues,
2317          * so there is no need to enable it in this queue again.
2318          * The local_conf.offloads input to underlying PMD only carries
2319          * those offloadings which are only enabled on this queue and
2320          * not enabled on all queues.
2321          */
2322         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2323
2324         /*
2325          * New added offloadings for this queue are those not enabled in
2326          * rte_eth_dev_configure() and they must be per-queue type.
2327          * A pure per-port offloading can't be enabled on a queue while
2328          * disabled on another queue. A pure per-port offloading can't
2329          * be enabled for any queue as new added one if it hasn't been
2330          * enabled in rte_eth_dev_configure().
2331          */
2332         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2333              local_conf.offloads) {
2334                 RTE_ETHDEV_LOG(ERR,
2335                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2336                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2337                         port_id, tx_queue_id, local_conf.offloads,
2338                         dev_info.tx_queue_offload_capa,
2339                         __func__);
2340                 return -EINVAL;
2341         }
2342
2343         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2344         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2345                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2346 }
2347
2348 int
2349 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2350                                uint16_t nb_tx_desc,
2351                                const struct rte_eth_hairpin_conf *conf)
2352 {
2353         struct rte_eth_dev *dev;
2354         struct rte_eth_hairpin_cap cap;
2355         void **txq;
2356         int i;
2357         int count;
2358         int ret;
2359
2360         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2361         dev = &rte_eth_devices[port_id];
2362
2363         if (tx_queue_id >= dev->data->nb_tx_queues) {
2364                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2365                 return -EINVAL;
2366         }
2367
2368         if (conf == NULL) {
2369                 RTE_ETHDEV_LOG(ERR,
2370                         "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2371                         port_id);
2372                 return -EINVAL;
2373         }
2374
2375         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2376         if (ret != 0)
2377                 return ret;
2378         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2379                                 -ENOTSUP);
2380         /* if nb_rx_desc is zero use max number of desc from the driver. */
2381         if (nb_tx_desc == 0)
2382                 nb_tx_desc = cap.max_nb_desc;
2383         if (nb_tx_desc > cap.max_nb_desc) {
2384                 RTE_ETHDEV_LOG(ERR,
2385                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2386                         nb_tx_desc, cap.max_nb_desc);
2387                 return -EINVAL;
2388         }
2389         if (conf->peer_count > cap.max_tx_2_rx) {
2390                 RTE_ETHDEV_LOG(ERR,
2391                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2392                         conf->peer_count, cap.max_tx_2_rx);
2393                 return -EINVAL;
2394         }
2395         if (conf->peer_count == 0) {
2396                 RTE_ETHDEV_LOG(ERR,
2397                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2398                         conf->peer_count);
2399                 return -EINVAL;
2400         }
2401         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2402              cap.max_nb_queues != UINT16_MAX; i++) {
2403                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2404                         count++;
2405         }
2406         if (count > cap.max_nb_queues) {
2407                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2408                 cap.max_nb_queues);
2409                 return -EINVAL;
2410         }
2411         if (dev->data->dev_started)
2412                 return -EBUSY;
2413         txq = dev->data->tx_queues;
2414         if (txq[tx_queue_id] != NULL) {
2415                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2416                                         -ENOTSUP);
2417                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2418                 txq[tx_queue_id] = NULL;
2419         }
2420         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2421                 (dev, tx_queue_id, nb_tx_desc, conf);
2422         if (ret == 0)
2423                 dev->data->tx_queue_state[tx_queue_id] =
2424                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2425         return eth_err(port_id, ret);
2426 }
2427
2428 int
2429 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2430 {
2431         struct rte_eth_dev *dev;
2432         int ret;
2433
2434         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2435         dev = &rte_eth_devices[tx_port];
2436
2437         if (dev->data->dev_started == 0) {
2438                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2439                 return -EBUSY;
2440         }
2441
2442         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2443         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2444         if (ret != 0)
2445                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2446                                " to Rx %d (%d - all ports)\n",
2447                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2448
2449         return ret;
2450 }
2451
2452 int
2453 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2454 {
2455         struct rte_eth_dev *dev;
2456         int ret;
2457
2458         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2459         dev = &rte_eth_devices[tx_port];
2460
2461         if (dev->data->dev_started == 0) {
2462                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2463                 return -EBUSY;
2464         }
2465
2466         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2467         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2468         if (ret != 0)
2469                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2470                                " from Rx %d (%d - all ports)\n",
2471                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2472
2473         return ret;
2474 }
2475
2476 int
2477 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2478                                size_t len, uint32_t direction)
2479 {
2480         struct rte_eth_dev *dev;
2481         int ret;
2482
2483         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2484         dev = &rte_eth_devices[port_id];
2485
2486         if (peer_ports == NULL) {
2487                 RTE_ETHDEV_LOG(ERR,
2488                         "Cannot get ethdev port %u hairpin peer ports to NULL\n",
2489                         port_id);
2490                 return -EINVAL;
2491         }
2492
2493         if (len == 0) {
2494                 RTE_ETHDEV_LOG(ERR,
2495                         "Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2496                         port_id);
2497                 return -EINVAL;
2498         }
2499
2500         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2501                                 -ENOTSUP);
2502
2503         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2504                                                       len, direction);
2505         if (ret < 0)
2506                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2507                                port_id, direction ? "Rx" : "Tx");
2508
2509         return ret;
2510 }
2511
2512 void
2513 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2514                 void *userdata __rte_unused)
2515 {
2516         rte_pktmbuf_free_bulk(pkts, unsent);
2517 }
2518
2519 void
2520 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2521                 void *userdata)
2522 {
2523         uint64_t *count = userdata;
2524
2525         rte_pktmbuf_free_bulk(pkts, unsent);
2526         *count += unsent;
2527 }
2528
2529 int
2530 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2531                 buffer_tx_error_fn cbfn, void *userdata)
2532 {
2533         if (buffer == NULL) {
2534                 RTE_ETHDEV_LOG(ERR,
2535                         "Cannot set Tx buffer error callback to NULL buffer\n");
2536                 return -EINVAL;
2537         }
2538
2539         buffer->error_callback = cbfn;
2540         buffer->error_userdata = userdata;
2541         return 0;
2542 }
2543
2544 int
2545 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2546 {
2547         int ret = 0;
2548
2549         if (buffer == NULL) {
2550                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2551                 return -EINVAL;
2552         }
2553
2554         buffer->size = size;
2555         if (buffer->error_callback == NULL) {
2556                 ret = rte_eth_tx_buffer_set_err_callback(
2557                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2558         }
2559
2560         return ret;
2561 }
2562
2563 int
2564 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2565 {
2566         struct rte_eth_dev *dev;
2567         int ret;
2568
2569         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2570         dev = &rte_eth_devices[port_id];
2571
2572         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2573
2574         /* Call driver to free pending mbufs. */
2575         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2576                                                free_cnt);
2577         return eth_err(port_id, ret);
2578 }
2579
2580 int
2581 rte_eth_promiscuous_enable(uint16_t port_id)
2582 {
2583         struct rte_eth_dev *dev;
2584         int diag = 0;
2585
2586         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2587         dev = &rte_eth_devices[port_id];
2588
2589         if (dev->data->promiscuous == 1)
2590                 return 0;
2591
2592         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2593
2594         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2595         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2596
2597         return eth_err(port_id, diag);
2598 }
2599
2600 int
2601 rte_eth_promiscuous_disable(uint16_t port_id)
2602 {
2603         struct rte_eth_dev *dev;
2604         int diag = 0;
2605
2606         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2607         dev = &rte_eth_devices[port_id];
2608
2609         if (dev->data->promiscuous == 0)
2610                 return 0;
2611
2612         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2613
2614         dev->data->promiscuous = 0;
2615         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2616         if (diag != 0)
2617                 dev->data->promiscuous = 1;
2618
2619         return eth_err(port_id, diag);
2620 }
2621
2622 int
2623 rte_eth_promiscuous_get(uint16_t port_id)
2624 {
2625         struct rte_eth_dev *dev;
2626
2627         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2628         dev = &rte_eth_devices[port_id];
2629
2630         return dev->data->promiscuous;
2631 }
2632
2633 int
2634 rte_eth_allmulticast_enable(uint16_t port_id)
2635 {
2636         struct rte_eth_dev *dev;
2637         int diag;
2638
2639         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2640         dev = &rte_eth_devices[port_id];
2641
2642         if (dev->data->all_multicast == 1)
2643                 return 0;
2644
2645         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2646         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2647         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2648
2649         return eth_err(port_id, diag);
2650 }
2651
2652 int
2653 rte_eth_allmulticast_disable(uint16_t port_id)
2654 {
2655         struct rte_eth_dev *dev;
2656         int diag;
2657
2658         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2659         dev = &rte_eth_devices[port_id];
2660
2661         if (dev->data->all_multicast == 0)
2662                 return 0;
2663
2664         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2665         dev->data->all_multicast = 0;
2666         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2667         if (diag != 0)
2668                 dev->data->all_multicast = 1;
2669
2670         return eth_err(port_id, diag);
2671 }
2672
2673 int
2674 rte_eth_allmulticast_get(uint16_t port_id)
2675 {
2676         struct rte_eth_dev *dev;
2677
2678         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2679         dev = &rte_eth_devices[port_id];
2680
2681         return dev->data->all_multicast;
2682 }
2683
2684 int
2685 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2686 {
2687         struct rte_eth_dev *dev;
2688
2689         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2690         dev = &rte_eth_devices[port_id];
2691
2692         if (eth_link == NULL) {
2693                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2694                         port_id);
2695                 return -EINVAL;
2696         }
2697
2698         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2699                 rte_eth_linkstatus_get(dev, eth_link);
2700         else {
2701                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2702                 (*dev->dev_ops->link_update)(dev, 1);
2703                 *eth_link = dev->data->dev_link;
2704         }
2705
2706         return 0;
2707 }
2708
2709 int
2710 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2711 {
2712         struct rte_eth_dev *dev;
2713
2714         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2715         dev = &rte_eth_devices[port_id];
2716
2717         if (eth_link == NULL) {
2718                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2719                         port_id);
2720                 return -EINVAL;
2721         }
2722
2723         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2724                 rte_eth_linkstatus_get(dev, eth_link);
2725         else {
2726                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2727                 (*dev->dev_ops->link_update)(dev, 0);
2728                 *eth_link = dev->data->dev_link;
2729         }
2730
2731         return 0;
2732 }
2733
2734 const char *
2735 rte_eth_link_speed_to_str(uint32_t link_speed)
2736 {
2737         switch (link_speed) {
2738         case ETH_SPEED_NUM_NONE: return "None";
2739         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2740         case ETH_SPEED_NUM_100M: return "100 Mbps";
2741         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2742         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2743         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2744         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2745         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2746         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2747         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2748         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2749         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2750         case ETH_SPEED_NUM_100G: return "100 Gbps";
2751         case ETH_SPEED_NUM_200G: return "200 Gbps";
2752         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2753         default: return "Invalid";
2754         }
2755 }
2756
2757 int
2758 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2759 {
2760         if (str == NULL) {
2761                 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2762                 return -EINVAL;
2763         }
2764
2765         if (len == 0) {
2766                 RTE_ETHDEV_LOG(ERR,
2767                         "Cannot convert link to string with zero size\n");
2768                 return -EINVAL;
2769         }
2770
2771         if (eth_link == NULL) {
2772                 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2773                 return -EINVAL;
2774         }
2775
2776         if (eth_link->link_status == ETH_LINK_DOWN)
2777                 return snprintf(str, len, "Link down");
2778         else
2779                 return snprintf(str, len, "Link up at %s %s %s",
2780                         rte_eth_link_speed_to_str(eth_link->link_speed),
2781                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2782                         "FDX" : "HDX",
2783                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2784                         "Autoneg" : "Fixed");
2785 }
2786
2787 int
2788 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2789 {
2790         struct rte_eth_dev *dev;
2791
2792         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2793         dev = &rte_eth_devices[port_id];
2794
2795         if (stats == NULL) {
2796                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2797                         port_id);
2798                 return -EINVAL;
2799         }
2800
2801         memset(stats, 0, sizeof(*stats));
2802
2803         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2804         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2805         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2806 }
2807
2808 int
2809 rte_eth_stats_reset(uint16_t port_id)
2810 {
2811         struct rte_eth_dev *dev;
2812         int ret;
2813
2814         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2815         dev = &rte_eth_devices[port_id];
2816
2817         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2818         ret = (*dev->dev_ops->stats_reset)(dev);
2819         if (ret != 0)
2820                 return eth_err(port_id, ret);
2821
2822         dev->data->rx_mbuf_alloc_failed = 0;
2823
2824         return 0;
2825 }
2826
2827 static inline int
2828 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2829 {
2830         uint16_t nb_rxqs, nb_txqs;
2831         int count;
2832
2833         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2834         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2835
2836         count = RTE_NB_STATS;
2837         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2838                 count += nb_rxqs * RTE_NB_RXQ_STATS;
2839                 count += nb_txqs * RTE_NB_TXQ_STATS;
2840         }
2841
2842         return count;
2843 }
2844
2845 static int
2846 eth_dev_get_xstats_count(uint16_t port_id)
2847 {
2848         struct rte_eth_dev *dev;
2849         int count;
2850
2851         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2852         dev = &rte_eth_devices[port_id];
2853         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2854                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2855                                 NULL, 0);
2856                 if (count < 0)
2857                         return eth_err(port_id, count);
2858         }
2859         if (dev->dev_ops->xstats_get_names != NULL) {
2860                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2861                 if (count < 0)
2862                         return eth_err(port_id, count);
2863         } else
2864                 count = 0;
2865
2866
2867         count += eth_dev_get_xstats_basic_count(dev);
2868
2869         return count;
2870 }
2871
2872 int
2873 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2874                 uint64_t *id)
2875 {
2876         int cnt_xstats, idx_xstat;
2877
2878         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2879
2880         if (xstat_name == NULL) {
2881                 RTE_ETHDEV_LOG(ERR,
2882                         "Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2883                         port_id);
2884                 return -ENOMEM;
2885         }
2886
2887         if (id == NULL) {
2888                 RTE_ETHDEV_LOG(ERR,
2889                         "Cannot get ethdev port %u xstats ID to NULL\n",
2890                         port_id);
2891                 return -ENOMEM;
2892         }
2893
2894         /* Get count */
2895         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2896         if (cnt_xstats  < 0) {
2897                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2898                 return -ENODEV;
2899         }
2900
2901         /* Get id-name lookup table */
2902         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2903
2904         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2905                         port_id, xstats_names, cnt_xstats, NULL)) {
2906                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2907                 return -1;
2908         }
2909
2910         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2911                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2912                         *id = idx_xstat;
2913                         return 0;
2914                 };
2915         }
2916
2917         return -EINVAL;
2918 }
2919
2920 /* retrieve basic stats names */
2921 static int
2922 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2923         struct rte_eth_xstat_name *xstats_names)
2924 {
2925         int cnt_used_entries = 0;
2926         uint32_t idx, id_queue;
2927         uint16_t num_q;
2928
2929         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2930                 strlcpy(xstats_names[cnt_used_entries].name,
2931                         eth_dev_stats_strings[idx].name,
2932                         sizeof(xstats_names[0].name));
2933                 cnt_used_entries++;
2934         }
2935
2936         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2937                 return cnt_used_entries;
2938
2939         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2940         for (id_queue = 0; id_queue < num_q; id_queue++) {
2941                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2942                         snprintf(xstats_names[cnt_used_entries].name,
2943                                 sizeof(xstats_names[0].name),
2944                                 "rx_q%u_%s",
2945                                 id_queue, eth_dev_rxq_stats_strings[idx].name);
2946                         cnt_used_entries++;
2947                 }
2948
2949         }
2950         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2951         for (id_queue = 0; id_queue < num_q; id_queue++) {
2952                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2953                         snprintf(xstats_names[cnt_used_entries].name,
2954                                 sizeof(xstats_names[0].name),
2955                                 "tx_q%u_%s",
2956                                 id_queue, eth_dev_txq_stats_strings[idx].name);
2957                         cnt_used_entries++;
2958                 }
2959         }
2960         return cnt_used_entries;
2961 }
2962
2963 /* retrieve ethdev extended statistics names */
2964 int
2965 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2966         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2967         uint64_t *ids)
2968 {
2969         struct rte_eth_xstat_name *xstats_names_copy;
2970         unsigned int no_basic_stat_requested = 1;
2971         unsigned int no_ext_stat_requested = 1;
2972         unsigned int expected_entries;
2973         unsigned int basic_count;
2974         struct rte_eth_dev *dev;
2975         unsigned int i;
2976         int ret;
2977
2978         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2979         dev = &rte_eth_devices[port_id];
2980
2981         basic_count = eth_dev_get_xstats_basic_count(dev);
2982         ret = eth_dev_get_xstats_count(port_id);
2983         if (ret < 0)
2984                 return ret;
2985         expected_entries = (unsigned int)ret;
2986
2987         /* Return max number of stats if no ids given */
2988         if (!ids) {
2989                 if (!xstats_names)
2990                         return expected_entries;
2991                 else if (xstats_names && size < expected_entries)
2992                         return expected_entries;
2993         }
2994
2995         if (ids && !xstats_names)
2996                 return -EINVAL;
2997
2998         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2999                 uint64_t ids_copy[size];
3000
3001                 for (i = 0; i < size; i++) {
3002                         if (ids[i] < basic_count) {
3003                                 no_basic_stat_requested = 0;
3004                                 break;
3005                         }
3006
3007                         /*
3008                          * Convert ids to xstats ids that PMD knows.
3009                          * ids known by user are basic + extended stats.
3010                          */
3011                         ids_copy[i] = ids[i] - basic_count;
3012                 }
3013
3014                 if (no_basic_stat_requested)
3015                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
3016                                         xstats_names, ids_copy, size);
3017         }
3018
3019         /* Retrieve all stats */
3020         if (!ids) {
3021                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
3022                                 expected_entries);
3023                 if (num_stats < 0 || num_stats > (int)expected_entries)
3024                         return num_stats;
3025                 else
3026                         return expected_entries;
3027         }
3028
3029         xstats_names_copy = calloc(expected_entries,
3030                 sizeof(struct rte_eth_xstat_name));
3031
3032         if (!xstats_names_copy) {
3033                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
3034                 return -ENOMEM;
3035         }
3036
3037         if (ids) {
3038                 for (i = 0; i < size; i++) {
3039                         if (ids[i] >= basic_count) {
3040                                 no_ext_stat_requested = 0;
3041                                 break;
3042                         }
3043                 }
3044         }
3045
3046         /* Fill xstats_names_copy structure */
3047         if (ids && no_ext_stat_requested) {
3048                 eth_basic_stats_get_names(dev, xstats_names_copy);
3049         } else {
3050                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
3051                         expected_entries);
3052                 if (ret < 0) {
3053                         free(xstats_names_copy);
3054                         return ret;
3055                 }
3056         }
3057
3058         /* Filter stats */
3059         for (i = 0; i < size; i++) {
3060                 if (ids[i] >= expected_entries) {
3061                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3062                         free(xstats_names_copy);
3063                         return -1;
3064                 }
3065                 xstats_names[i] = xstats_names_copy[ids[i]];
3066         }
3067
3068         free(xstats_names_copy);
3069         return size;
3070 }
3071
3072 int
3073 rte_eth_xstats_get_names(uint16_t port_id,
3074         struct rte_eth_xstat_name *xstats_names,
3075         unsigned int size)
3076 {
3077         struct rte_eth_dev *dev;
3078         int cnt_used_entries;
3079         int cnt_expected_entries;
3080         int cnt_driver_entries;
3081
3082         cnt_expected_entries = eth_dev_get_xstats_count(port_id);
3083         if (xstats_names == NULL || cnt_expected_entries < 0 ||
3084                         (int)size < cnt_expected_entries)
3085                 return cnt_expected_entries;
3086
3087         /* port_id checked in eth_dev_get_xstats_count() */
3088         dev = &rte_eth_devices[port_id];
3089
3090         cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3091
3092         if (dev->dev_ops->xstats_get_names != NULL) {
3093                 /* If there are any driver-specific xstats, append them
3094                  * to end of list.
3095                  */
3096                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3097                         dev,
3098                         xstats_names + cnt_used_entries,
3099                         size - cnt_used_entries);
3100                 if (cnt_driver_entries < 0)
3101                         return eth_err(port_id, cnt_driver_entries);
3102                 cnt_used_entries += cnt_driver_entries;
3103         }
3104
3105         return cnt_used_entries;
3106 }
3107
3108
3109 static int
3110 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
3111 {
3112         struct rte_eth_dev *dev;
3113         struct rte_eth_stats eth_stats;
3114         unsigned int count = 0, i, q;
3115         uint64_t val, *stats_ptr;
3116         uint16_t nb_rxqs, nb_txqs;
3117         int ret;
3118
3119         ret = rte_eth_stats_get(port_id, &eth_stats);
3120         if (ret < 0)
3121                 return ret;
3122
3123         dev = &rte_eth_devices[port_id];
3124
3125         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3126         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3127
3128         /* global stats */
3129         for (i = 0; i < RTE_NB_STATS; i++) {
3130                 stats_ptr = RTE_PTR_ADD(&eth_stats,
3131                                         eth_dev_stats_strings[i].offset);
3132                 val = *stats_ptr;
3133                 xstats[count++].value = val;
3134         }
3135
3136         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3137                 return count;
3138
3139         /* per-rxq stats */
3140         for (q = 0; q < nb_rxqs; q++) {
3141                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3142                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3143                                         eth_dev_rxq_stats_strings[i].offset +
3144                                         q * sizeof(uint64_t));
3145                         val = *stats_ptr;
3146                         xstats[count++].value = val;
3147                 }
3148         }
3149
3150         /* per-txq stats */
3151         for (q = 0; q < nb_txqs; q++) {
3152                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3153                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3154                                         eth_dev_txq_stats_strings[i].offset +
3155                                         q * sizeof(uint64_t));
3156                         val = *stats_ptr;
3157                         xstats[count++].value = val;
3158                 }
3159         }
3160         return count;
3161 }
3162
3163 /* retrieve ethdev extended statistics */
3164 int
3165 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3166                          uint64_t *values, unsigned int size)
3167 {
3168         unsigned int no_basic_stat_requested = 1;
3169         unsigned int no_ext_stat_requested = 1;
3170         unsigned int num_xstats_filled;
3171         unsigned int basic_count;
3172         uint16_t expected_entries;
3173         struct rte_eth_dev *dev;
3174         unsigned int i;
3175         int ret;
3176
3177         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3178         dev = &rte_eth_devices[port_id];
3179
3180         ret = eth_dev_get_xstats_count(port_id);
3181         if (ret < 0)
3182                 return ret;
3183         expected_entries = (uint16_t)ret;
3184         struct rte_eth_xstat xstats[expected_entries];
3185         basic_count = eth_dev_get_xstats_basic_count(dev);
3186
3187         /* Return max number of stats if no ids given */
3188         if (!ids) {
3189                 if (!values)
3190                         return expected_entries;
3191                 else if (values && size < expected_entries)
3192                         return expected_entries;
3193         }
3194
3195         if (ids && !values)
3196                 return -EINVAL;
3197
3198         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3199                 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3200                 uint64_t ids_copy[size];
3201
3202                 for (i = 0; i < size; i++) {
3203                         if (ids[i] < basic_count) {
3204                                 no_basic_stat_requested = 0;
3205                                 break;
3206                         }
3207
3208                         /*
3209                          * Convert ids to xstats ids that PMD knows.
3210                          * ids known by user are basic + extended stats.
3211                          */
3212                         ids_copy[i] = ids[i] - basic_count;
3213                 }
3214
3215                 if (no_basic_stat_requested)
3216                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3217                                         values, size);
3218         }
3219
3220         if (ids) {
3221                 for (i = 0; i < size; i++) {
3222                         if (ids[i] >= basic_count) {
3223                                 no_ext_stat_requested = 0;
3224                                 break;
3225                         }
3226                 }
3227         }
3228
3229         /* Fill the xstats structure */
3230         if (ids && no_ext_stat_requested)
3231                 ret = eth_basic_stats_get(port_id, xstats);
3232         else
3233                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3234
3235         if (ret < 0)
3236                 return ret;
3237         num_xstats_filled = (unsigned int)ret;
3238
3239         /* Return all stats */
3240         if (!ids) {
3241                 for (i = 0; i < num_xstats_filled; i++)
3242                         values[i] = xstats[i].value;
3243                 return expected_entries;
3244         }
3245
3246         /* Filter stats */
3247         for (i = 0; i < size; i++) {
3248                 if (ids[i] >= expected_entries) {
3249                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3250                         return -1;
3251                 }
3252                 values[i] = xstats[ids[i]].value;
3253         }
3254         return size;
3255 }
3256
3257 int
3258 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3259         unsigned int n)
3260 {
3261         struct rte_eth_dev *dev;
3262         unsigned int count = 0, i;
3263         signed int xcount = 0;
3264         uint16_t nb_rxqs, nb_txqs;
3265         int ret;
3266
3267         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3268         dev = &rte_eth_devices[port_id];
3269
3270         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3271         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3272
3273         /* Return generic statistics */
3274         count = RTE_NB_STATS;
3275         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3276                 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3277
3278         /* implemented by the driver */
3279         if (dev->dev_ops->xstats_get != NULL) {
3280                 /* Retrieve the xstats from the driver at the end of the
3281                  * xstats struct.
3282                  */
3283                 xcount = (*dev->dev_ops->xstats_get)(dev,
3284                                      xstats ? xstats + count : NULL,
3285                                      (n > count) ? n - count : 0);
3286
3287                 if (xcount < 0)
3288                         return eth_err(port_id, xcount);
3289         }
3290
3291         if (n < count + xcount || xstats == NULL)
3292                 return count + xcount;
3293
3294         /* now fill the xstats structure */
3295         ret = eth_basic_stats_get(port_id, xstats);
3296         if (ret < 0)
3297                 return ret;
3298         count = ret;
3299
3300         for (i = 0; i < count; i++)
3301                 xstats[i].id = i;
3302         /* add an offset to driver-specific stats */
3303         for ( ; i < count + xcount; i++)
3304                 xstats[i].id += count;
3305
3306         return count + xcount;
3307 }
3308
3309 /* reset ethdev extended statistics */
3310 int
3311 rte_eth_xstats_reset(uint16_t port_id)
3312 {
3313         struct rte_eth_dev *dev;
3314
3315         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3316         dev = &rte_eth_devices[port_id];
3317
3318         /* implemented by the driver */
3319         if (dev->dev_ops->xstats_reset != NULL)
3320                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3321
3322         /* fallback to default */
3323         return rte_eth_stats_reset(port_id);
3324 }
3325
3326 static int
3327 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3328                 uint8_t stat_idx, uint8_t is_rx)
3329 {
3330         struct rte_eth_dev *dev;
3331
3332         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3333         dev = &rte_eth_devices[port_id];
3334
3335         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3336                 return -EINVAL;
3337
3338         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3339                 return -EINVAL;
3340
3341         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3342                 return -EINVAL;
3343
3344         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3345         return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3346 }
3347
3348 int
3349 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3350                 uint8_t stat_idx)
3351 {
3352         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3353                                                 tx_queue_id,
3354                                                 stat_idx, STAT_QMAP_TX));
3355 }
3356
3357 int
3358 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3359                 uint8_t stat_idx)
3360 {
3361         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3362                                                 rx_queue_id,
3363                                                 stat_idx, STAT_QMAP_RX));
3364 }
3365
3366 int
3367 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3368 {
3369         struct rte_eth_dev *dev;
3370
3371         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3372         dev = &rte_eth_devices[port_id];
3373
3374         if (fw_version == NULL && fw_size > 0) {
3375                 RTE_ETHDEV_LOG(ERR,
3376                         "Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3377                         port_id);
3378                 return -EINVAL;
3379         }
3380
3381         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3382         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3383                                                         fw_version, fw_size));
3384 }
3385
3386 int
3387 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3388 {
3389         struct rte_eth_dev *dev;
3390         const struct rte_eth_desc_lim lim = {
3391                 .nb_max = UINT16_MAX,
3392                 .nb_min = 0,
3393                 .nb_align = 1,
3394                 .nb_seg_max = UINT16_MAX,
3395                 .nb_mtu_seg_max = UINT16_MAX,
3396         };
3397         int diag;
3398
3399         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3400         dev = &rte_eth_devices[port_id];
3401
3402         if (dev_info == NULL) {
3403                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3404                         port_id);
3405                 return -EINVAL;
3406         }
3407
3408         /*
3409          * Init dev_info before port_id check since caller does not have
3410          * return status and does not know if get is successful or not.
3411          */
3412         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3413         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3414
3415         dev_info->rx_desc_lim = lim;
3416         dev_info->tx_desc_lim = lim;
3417         dev_info->device = dev->device;
3418         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3419         dev_info->max_mtu = UINT16_MAX;
3420
3421         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3422         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3423         if (diag != 0) {
3424                 /* Cleanup already filled in device information */
3425                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3426                 return eth_err(port_id, diag);
3427         }
3428
3429         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3430         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3431                         RTE_MAX_QUEUES_PER_PORT);
3432         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3433                         RTE_MAX_QUEUES_PER_PORT);
3434
3435         dev_info->driver_name = dev->device->driver->name;
3436         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3437         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3438
3439         dev_info->dev_flags = &dev->data->dev_flags;
3440
3441         return 0;
3442 }
3443
3444 int
3445 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3446                                  uint32_t *ptypes, int num)
3447 {
3448         int i, j;
3449         struct rte_eth_dev *dev;
3450         const uint32_t *all_ptypes;
3451
3452         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3453         dev = &rte_eth_devices[port_id];
3454
3455         if (ptypes == NULL && num > 0) {
3456                 RTE_ETHDEV_LOG(ERR,
3457                         "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3458                         port_id);
3459                 return -EINVAL;
3460         }
3461
3462         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3463         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3464
3465         if (!all_ptypes)
3466                 return 0;
3467
3468         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3469                 if (all_ptypes[i] & ptype_mask) {
3470                         if (j < num)
3471                                 ptypes[j] = all_ptypes[i];
3472                         j++;
3473                 }
3474
3475         return j;
3476 }
3477
3478 int
3479 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3480                                  uint32_t *set_ptypes, unsigned int num)
3481 {
3482         const uint32_t valid_ptype_masks[] = {
3483                 RTE_PTYPE_L2_MASK,
3484                 RTE_PTYPE_L3_MASK,
3485                 RTE_PTYPE_L4_MASK,
3486                 RTE_PTYPE_TUNNEL_MASK,
3487                 RTE_PTYPE_INNER_L2_MASK,
3488                 RTE_PTYPE_INNER_L3_MASK,
3489                 RTE_PTYPE_INNER_L4_MASK,
3490         };
3491         const uint32_t *all_ptypes;
3492         struct rte_eth_dev *dev;
3493         uint32_t unused_mask;
3494         unsigned int i, j;
3495         int ret;
3496
3497         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3498         dev = &rte_eth_devices[port_id];
3499
3500         if (num > 0 && set_ptypes == NULL) {
3501                 RTE_ETHDEV_LOG(ERR,
3502                         "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3503                         port_id);
3504                 return -EINVAL;
3505         }
3506
3507         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3508                         *dev->dev_ops->dev_ptypes_set == NULL) {
3509                 ret = 0;
3510                 goto ptype_unknown;
3511         }
3512
3513         if (ptype_mask == 0) {
3514                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3515                                 ptype_mask);
3516                 goto ptype_unknown;
3517         }
3518
3519         unused_mask = ptype_mask;
3520         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3521                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3522                 if (mask && mask != valid_ptype_masks[i]) {
3523                         ret = -EINVAL;
3524                         goto ptype_unknown;
3525                 }
3526                 unused_mask &= ~valid_ptype_masks[i];
3527         }
3528
3529         if (unused_mask) {
3530                 ret = -EINVAL;
3531                 goto ptype_unknown;
3532         }
3533
3534         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3535         if (all_ptypes == NULL) {
3536                 ret = 0;
3537                 goto ptype_unknown;
3538         }
3539
3540         /*
3541          * Accommodate as many set_ptypes as possible. If the supplied
3542          * set_ptypes array is insufficient fill it partially.
3543          */
3544         for (i = 0, j = 0; set_ptypes != NULL &&
3545                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3546                 if (ptype_mask & all_ptypes[i]) {
3547                         if (j < num - 1) {
3548                                 set_ptypes[j] = all_ptypes[i];
3549                                 j++;
3550                                 continue;
3551                         }
3552                         break;
3553                 }
3554         }
3555
3556         if (set_ptypes != NULL && j < num)
3557                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3558
3559         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3560
3561 ptype_unknown:
3562         if (num > 0)
3563                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3564
3565         return ret;
3566 }
3567
3568 int
3569 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3570 {
3571         struct rte_eth_dev *dev;
3572
3573         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3574         dev = &rte_eth_devices[port_id];
3575
3576         if (mac_addr == NULL) {
3577                 RTE_ETHDEV_LOG(ERR,
3578                         "Cannot get ethdev port %u MAC address to NULL\n",
3579                         port_id);
3580                 return -EINVAL;
3581         }
3582
3583         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3584
3585         return 0;
3586 }
3587
3588 int
3589 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3590 {
3591         struct rte_eth_dev *dev;
3592
3593         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3594         dev = &rte_eth_devices[port_id];
3595
3596         if (mtu == NULL) {
3597                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3598                         port_id);
3599                 return -EINVAL;
3600         }
3601
3602         *mtu = dev->data->mtu;
3603         return 0;
3604 }
3605
3606 int
3607 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3608 {
3609         int ret;
3610         struct rte_eth_dev_info dev_info;
3611         struct rte_eth_dev *dev;
3612
3613         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3614         dev = &rte_eth_devices[port_id];
3615         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3616
3617         /*
3618          * Check if the device supports dev_infos_get, if it does not
3619          * skip min_mtu/max_mtu validation here as this requires values
3620          * that are populated within the call to rte_eth_dev_info_get()
3621          * which relies on dev->dev_ops->dev_infos_get.
3622          */
3623         if (*dev->dev_ops->dev_infos_get != NULL) {
3624                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3625                 if (ret != 0)
3626                         return ret;
3627
3628                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3629                         return -EINVAL;
3630         }
3631
3632         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3633         if (!ret)
3634                 dev->data->mtu = mtu;
3635
3636         return eth_err(port_id, ret);
3637 }
3638
3639 int
3640 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3641 {
3642         struct rte_eth_dev *dev;
3643         int ret;
3644
3645         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3646         dev = &rte_eth_devices[port_id];
3647
3648         if (!(dev->data->dev_conf.rxmode.offloads &
3649               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3650                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3651                         port_id);
3652                 return -ENOSYS;
3653         }
3654
3655         if (vlan_id > 4095) {
3656                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3657                         port_id, vlan_id);
3658                 return -EINVAL;
3659         }
3660         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3661
3662         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3663         if (ret == 0) {
3664                 struct rte_vlan_filter_conf *vfc;
3665                 int vidx;
3666                 int vbit;
3667
3668                 vfc = &dev->data->vlan_filter_conf;
3669                 vidx = vlan_id / 64;
3670                 vbit = vlan_id % 64;
3671
3672                 if (on)
3673                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3674                 else
3675                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3676         }
3677
3678         return eth_err(port_id, ret);
3679 }
3680
3681 int
3682 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3683                                     int on)
3684 {
3685         struct rte_eth_dev *dev;
3686
3687         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3688         dev = &rte_eth_devices[port_id];
3689
3690         if (rx_queue_id >= dev->data->nb_rx_queues) {
3691                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3692                 return -EINVAL;
3693         }
3694
3695         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3696         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3697
3698         return 0;
3699 }
3700
3701 int
3702 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3703                                 enum rte_vlan_type vlan_type,
3704                                 uint16_t tpid)
3705 {
3706         struct rte_eth_dev *dev;
3707
3708         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3709         dev = &rte_eth_devices[port_id];
3710
3711         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3712         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3713                                                                tpid));
3714 }
3715
3716 int
3717 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3718 {
3719         struct rte_eth_dev_info dev_info;
3720         struct rte_eth_dev *dev;
3721         int ret = 0;
3722         int mask = 0;
3723         int cur, org = 0;
3724         uint64_t orig_offloads;
3725         uint64_t dev_offloads;
3726         uint64_t new_offloads;
3727
3728         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3729         dev = &rte_eth_devices[port_id];
3730
3731         /* save original values in case of failure */
3732         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3733         dev_offloads = orig_offloads;
3734
3735         /* check which option changed by application */
3736         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3737         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3738         if (cur != org) {
3739                 if (cur)
3740                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3741                 else
3742                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3743                 mask |= ETH_VLAN_STRIP_MASK;
3744         }
3745
3746         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3747         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3748         if (cur != org) {
3749                 if (cur)
3750                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3751                 else
3752                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3753                 mask |= ETH_VLAN_FILTER_MASK;
3754         }
3755
3756         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3757         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3758         if (cur != org) {
3759                 if (cur)
3760                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3761                 else
3762                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3763                 mask |= ETH_VLAN_EXTEND_MASK;
3764         }
3765
3766         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3767         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3768         if (cur != org) {
3769                 if (cur)
3770                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3771                 else
3772                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3773                 mask |= ETH_QINQ_STRIP_MASK;
3774         }
3775
3776         /*no change*/
3777         if (mask == 0)
3778                 return ret;
3779
3780         ret = rte_eth_dev_info_get(port_id, &dev_info);
3781         if (ret != 0)
3782                 return ret;
3783
3784         /* Rx VLAN offloading must be within its device capabilities */
3785         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3786                 new_offloads = dev_offloads & ~orig_offloads;
3787                 RTE_ETHDEV_LOG(ERR,
3788                         "Ethdev port_id=%u requested new added VLAN offloads "
3789                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3790                         "0x%" PRIx64 " in %s()\n",
3791                         port_id, new_offloads, dev_info.rx_offload_capa,
3792                         __func__);
3793                 return -EINVAL;
3794         }
3795
3796         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3797         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3798         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3799         if (ret) {
3800                 /* hit an error restore  original values */
3801                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3802         }
3803
3804         return eth_err(port_id, ret);
3805 }
3806
3807 int
3808 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3809 {
3810         struct rte_eth_dev *dev;
3811         uint64_t *dev_offloads;
3812         int ret = 0;
3813
3814         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3815         dev = &rte_eth_devices[port_id];
3816         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3817
3818         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3819                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3820
3821         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3822                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3823
3824         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3825                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3826
3827         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3828                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3829
3830         return ret;
3831 }
3832
3833 int
3834 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3835 {
3836         struct rte_eth_dev *dev;
3837
3838         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3839         dev = &rte_eth_devices[port_id];
3840
3841         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3842         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3843 }
3844
3845 int
3846 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3847 {
3848         struct rte_eth_dev *dev;
3849
3850         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3851         dev = &rte_eth_devices[port_id];
3852
3853         if (fc_conf == NULL) {
3854                 RTE_ETHDEV_LOG(ERR,
3855                         "Cannot get ethdev port %u flow control config to NULL\n",
3856                         port_id);
3857                 return -EINVAL;
3858         }
3859
3860         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3861         memset(fc_conf, 0, sizeof(*fc_conf));
3862         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3863 }
3864
3865 int
3866 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3867 {
3868         struct rte_eth_dev *dev;
3869
3870         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3871         dev = &rte_eth_devices[port_id];
3872
3873         if (fc_conf == NULL) {
3874                 RTE_ETHDEV_LOG(ERR,
3875                         "Cannot set ethdev port %u flow control from NULL config\n",
3876                         port_id);
3877                 return -EINVAL;
3878         }
3879
3880         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3881                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3882                 return -EINVAL;
3883         }
3884
3885         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3886         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3887 }
3888
3889 int
3890 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3891                                    struct rte_eth_pfc_conf *pfc_conf)
3892 {
3893         struct rte_eth_dev *dev;
3894
3895         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3896         dev = &rte_eth_devices[port_id];
3897
3898         if (pfc_conf == NULL) {
3899                 RTE_ETHDEV_LOG(ERR,
3900                         "Cannot set ethdev port %u priority flow control from NULL config\n",
3901                         port_id);
3902                 return -EINVAL;
3903         }
3904
3905         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3906                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3907                 return -EINVAL;
3908         }
3909
3910         /* High water, low water validation are device specific */
3911         if  (*dev->dev_ops->priority_flow_ctrl_set)
3912                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3913                                         (dev, pfc_conf));
3914         return -ENOTSUP;
3915 }
3916
3917 static int
3918 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3919                         uint16_t reta_size)
3920 {
3921         uint16_t i, num;
3922
3923         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3924         for (i = 0; i < num; i++) {
3925                 if (reta_conf[i].mask)
3926                         return 0;
3927         }
3928
3929         return -EINVAL;
3930 }
3931
3932 static int
3933 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3934                          uint16_t reta_size,
3935                          uint16_t max_rxq)
3936 {
3937         uint16_t i, idx, shift;
3938
3939         if (max_rxq == 0) {
3940                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3941                 return -EINVAL;
3942         }
3943
3944         for (i = 0; i < reta_size; i++) {
3945                 idx = i / RTE_RETA_GROUP_SIZE;
3946                 shift = i % RTE_RETA_GROUP_SIZE;
3947                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3948                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3949                         RTE_ETHDEV_LOG(ERR,
3950                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3951                                 idx, shift,
3952                                 reta_conf[idx].reta[shift], max_rxq);
3953                         return -EINVAL;
3954                 }
3955         }
3956
3957         return 0;
3958 }
3959
3960 int
3961 rte_eth_dev_rss_reta_update(uint16_t port_id,
3962                             struct rte_eth_rss_reta_entry64 *reta_conf,
3963                             uint16_t reta_size)
3964 {
3965         struct rte_eth_dev *dev;
3966         int ret;
3967
3968         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3969         dev = &rte_eth_devices[port_id];
3970
3971         if (reta_conf == NULL) {
3972                 RTE_ETHDEV_LOG(ERR,
3973                         "Cannot update ethdev port %u RSS RETA to NULL\n",
3974                         port_id);
3975                 return -EINVAL;
3976         }
3977
3978         if (reta_size == 0) {
3979                 RTE_ETHDEV_LOG(ERR,
3980                         "Cannot update ethdev port %u RSS RETA with zero size\n",
3981                         port_id);
3982                 return -EINVAL;
3983         }
3984
3985         /* Check mask bits */
3986         ret = eth_check_reta_mask(reta_conf, reta_size);
3987         if (ret < 0)
3988                 return ret;
3989
3990         /* Check entry value */
3991         ret = eth_check_reta_entry(reta_conf, reta_size,
3992                                 dev->data->nb_rx_queues);
3993         if (ret < 0)
3994                 return ret;
3995
3996         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3997         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3998                                                              reta_size));
3999 }
4000
4001 int
4002 rte_eth_dev_rss_reta_query(uint16_t port_id,
4003                            struct rte_eth_rss_reta_entry64 *reta_conf,
4004                            uint16_t reta_size)
4005 {
4006         struct rte_eth_dev *dev;
4007         int ret;
4008
4009         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4010         dev = &rte_eth_devices[port_id];
4011
4012         if (reta_conf == NULL) {
4013                 RTE_ETHDEV_LOG(ERR,
4014                         "Cannot query ethdev port %u RSS RETA from NULL config\n",
4015                         port_id);
4016                 return -EINVAL;
4017         }
4018
4019         /* Check mask bits */
4020         ret = eth_check_reta_mask(reta_conf, reta_size);
4021         if (ret < 0)
4022                 return ret;
4023
4024         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
4025         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4026                                                             reta_size));
4027 }
4028
4029 int
4030 rte_eth_dev_rss_hash_update(uint16_t port_id,
4031                             struct rte_eth_rss_conf *rss_conf)
4032 {
4033         struct rte_eth_dev *dev;
4034         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
4035         int ret;
4036
4037         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4038         dev = &rte_eth_devices[port_id];
4039
4040         if (rss_conf == NULL) {
4041                 RTE_ETHDEV_LOG(ERR,
4042                         "Cannot update ethdev port %u RSS hash from NULL config\n",
4043                         port_id);
4044                 return -EINVAL;
4045         }
4046
4047         ret = rte_eth_dev_info_get(port_id, &dev_info);
4048         if (ret != 0)
4049                 return ret;
4050
4051         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4052         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4053             dev_info.flow_type_rss_offloads) {
4054                 RTE_ETHDEV_LOG(ERR,
4055                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4056                         port_id, rss_conf->rss_hf,
4057                         dev_info.flow_type_rss_offloads);
4058                 return -EINVAL;
4059         }
4060         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
4061         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4062                                                                  rss_conf));
4063 }
4064
4065 int
4066 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4067                               struct rte_eth_rss_conf *rss_conf)
4068 {
4069         struct rte_eth_dev *dev;
4070
4071         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4072         dev = &rte_eth_devices[port_id];
4073
4074         if (rss_conf == NULL) {
4075                 RTE_ETHDEV_LOG(ERR,
4076                         "Cannot get ethdev port %u RSS hash config to NULL\n",
4077                         port_id);
4078                 return -EINVAL;
4079         }
4080
4081         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
4082         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4083                                                                    rss_conf));
4084 }
4085
4086 int
4087 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4088                                 struct rte_eth_udp_tunnel *udp_tunnel)
4089 {
4090         struct rte_eth_dev *dev;
4091
4092         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4093         dev = &rte_eth_devices[port_id];
4094
4095         if (udp_tunnel == NULL) {
4096                 RTE_ETHDEV_LOG(ERR,
4097                         "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4098                         port_id);
4099                 return -EINVAL;
4100         }
4101
4102         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
4103                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4104                 return -EINVAL;
4105         }
4106
4107         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
4108         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4109                                                                 udp_tunnel));
4110 }
4111
4112 int
4113 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4114                                    struct rte_eth_udp_tunnel *udp_tunnel)
4115 {
4116         struct rte_eth_dev *dev;
4117
4118         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4119         dev = &rte_eth_devices[port_id];
4120
4121         if (udp_tunnel == NULL) {
4122                 RTE_ETHDEV_LOG(ERR,
4123                         "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4124                         port_id);
4125                 return -EINVAL;
4126         }
4127
4128         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
4129                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4130                 return -EINVAL;
4131         }
4132
4133         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
4134         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4135                                                                 udp_tunnel));
4136 }
4137
4138 int
4139 rte_eth_led_on(uint16_t port_id)
4140 {
4141         struct rte_eth_dev *dev;
4142
4143         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4144         dev = &rte_eth_devices[port_id];
4145
4146         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
4147         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4148 }
4149
4150 int
4151 rte_eth_led_off(uint16_t port_id)
4152 {
4153         struct rte_eth_dev *dev;
4154
4155         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4156         dev = &rte_eth_devices[port_id];
4157
4158         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
4159         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4160 }
4161
4162 int
4163 rte_eth_fec_get_capability(uint16_t port_id,
4164                            struct rte_eth_fec_capa *speed_fec_capa,
4165                            unsigned int num)
4166 {
4167         struct rte_eth_dev *dev;
4168         int ret;
4169
4170         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4171         dev = &rte_eth_devices[port_id];
4172
4173         if (speed_fec_capa == NULL && num > 0) {
4174                 RTE_ETHDEV_LOG(ERR,
4175                         "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4176                         port_id);
4177                 return -EINVAL;
4178         }
4179
4180         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
4181         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4182
4183         return ret;
4184 }
4185
4186 int
4187 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4188 {
4189         struct rte_eth_dev *dev;
4190
4191         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4192         dev = &rte_eth_devices[port_id];
4193
4194         if (fec_capa == NULL) {
4195                 RTE_ETHDEV_LOG(ERR,
4196                         "Cannot get ethdev port %u current FEC mode to NULL\n",
4197                         port_id);
4198                 return -EINVAL;
4199         }
4200
4201         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
4202         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4203 }
4204
4205 int
4206 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4207 {
4208         struct rte_eth_dev *dev;
4209
4210         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4211         dev = &rte_eth_devices[port_id];
4212
4213         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
4214         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4215 }
4216
4217 /*
4218  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4219  * an empty spot.
4220  */
4221 static int
4222 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4223 {
4224         struct rte_eth_dev_info dev_info;
4225         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4226         unsigned i;
4227         int ret;
4228
4229         ret = rte_eth_dev_info_get(port_id, &dev_info);
4230         if (ret != 0)
4231                 return -1;
4232
4233         for (i = 0; i < dev_info.max_mac_addrs; i++)
4234                 if (memcmp(addr, &dev->data->mac_addrs[i],
4235                                 RTE_ETHER_ADDR_LEN) == 0)
4236                         return i;
4237
4238         return -1;
4239 }
4240
4241 static const struct rte_ether_addr null_mac_addr;
4242
4243 int
4244 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4245                         uint32_t pool)
4246 {
4247         struct rte_eth_dev *dev;
4248         int index;
4249         uint64_t pool_mask;
4250         int ret;
4251
4252         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4253         dev = &rte_eth_devices[port_id];
4254
4255         if (addr == NULL) {
4256                 RTE_ETHDEV_LOG(ERR,
4257                         "Cannot add ethdev port %u MAC address from NULL address\n",
4258                         port_id);
4259                 return -EINVAL;
4260         }
4261
4262         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4263
4264         if (rte_is_zero_ether_addr(addr)) {
4265                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4266                         port_id);
4267                 return -EINVAL;
4268         }
4269         if (pool >= ETH_64_POOLS) {
4270                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
4271                 return -EINVAL;
4272         }
4273
4274         index = eth_dev_get_mac_addr_index(port_id, addr);
4275         if (index < 0) {
4276                 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4277                 if (index < 0) {
4278                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4279                                 port_id);
4280                         return -ENOSPC;
4281                 }
4282         } else {
4283                 pool_mask = dev->data->mac_pool_sel[index];
4284
4285                 /* Check if both MAC address and pool is already there, and do nothing */
4286                 if (pool_mask & (1ULL << pool))
4287                         return 0;
4288         }
4289
4290         /* Update NIC */
4291         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4292
4293         if (ret == 0) {
4294                 /* Update address in NIC data structure */
4295                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4296
4297                 /* Update pool bitmap in NIC data structure */
4298                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
4299         }
4300
4301         return eth_err(port_id, ret);
4302 }
4303
4304 int
4305 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4306 {
4307         struct rte_eth_dev *dev;
4308         int index;
4309
4310         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4311         dev = &rte_eth_devices[port_id];
4312
4313         if (addr == NULL) {
4314                 RTE_ETHDEV_LOG(ERR,
4315                         "Cannot remove ethdev port %u MAC address from NULL address\n",
4316                         port_id);
4317                 return -EINVAL;
4318         }
4319
4320         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4321
4322         index = eth_dev_get_mac_addr_index(port_id, addr);
4323         if (index == 0) {
4324                 RTE_ETHDEV_LOG(ERR,
4325                         "Port %u: Cannot remove default MAC address\n",
4326                         port_id);
4327                 return -EADDRINUSE;
4328         } else if (index < 0)
4329                 return 0;  /* Do nothing if address wasn't found */
4330
4331         /* Update NIC */
4332         (*dev->dev_ops->mac_addr_remove)(dev, index);
4333
4334         /* Update address in NIC data structure */
4335         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4336
4337         /* reset pool bitmap */
4338         dev->data->mac_pool_sel[index] = 0;
4339
4340         return 0;
4341 }
4342
4343 int
4344 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4345 {
4346         struct rte_eth_dev *dev;
4347         int ret;
4348
4349         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4350         dev = &rte_eth_devices[port_id];
4351
4352         if (addr == NULL) {
4353                 RTE_ETHDEV_LOG(ERR,
4354                         "Cannot set ethdev port %u default MAC address from NULL address\n",
4355                         port_id);
4356                 return -EINVAL;
4357         }
4358
4359         if (!rte_is_valid_assigned_ether_addr(addr))
4360                 return -EINVAL;
4361
4362         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4363
4364         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4365         if (ret < 0)
4366                 return ret;
4367
4368         /* Update default address in NIC data structure */
4369         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4370
4371         return 0;
4372 }
4373
4374
4375 /*
4376  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4377  * an empty spot.
4378  */
4379 static int
4380 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4381                 const struct rte_ether_addr *addr)
4382 {
4383         struct rte_eth_dev_info dev_info;
4384         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4385         unsigned i;
4386         int ret;
4387
4388         ret = rte_eth_dev_info_get(port_id, &dev_info);
4389         if (ret != 0)
4390                 return -1;
4391
4392         if (!dev->data->hash_mac_addrs)
4393                 return -1;
4394
4395         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4396                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4397                         RTE_ETHER_ADDR_LEN) == 0)
4398                         return i;
4399
4400         return -1;
4401 }
4402
4403 int
4404 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4405                                 uint8_t on)
4406 {
4407         int index;
4408         int ret;
4409         struct rte_eth_dev *dev;
4410
4411         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4412         dev = &rte_eth_devices[port_id];
4413
4414         if (addr == NULL) {
4415                 RTE_ETHDEV_LOG(ERR,
4416                         "Cannot set ethdev port %u unicast hash table from NULL address\n",
4417                         port_id);
4418                 return -EINVAL;
4419         }
4420
4421         if (rte_is_zero_ether_addr(addr)) {
4422                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4423                         port_id);
4424                 return -EINVAL;
4425         }
4426
4427         index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4428         /* Check if it's already there, and do nothing */
4429         if ((index >= 0) && on)
4430                 return 0;
4431
4432         if (index < 0) {
4433                 if (!on) {
4434                         RTE_ETHDEV_LOG(ERR,
4435                                 "Port %u: the MAC address was not set in UTA\n",
4436                                 port_id);
4437                         return -EINVAL;
4438                 }
4439
4440                 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4441                 if (index < 0) {
4442                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4443                                 port_id);
4444                         return -ENOSPC;
4445                 }
4446         }
4447
4448         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4449         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4450         if (ret == 0) {
4451                 /* Update address in NIC data structure */
4452                 if (on)
4453                         rte_ether_addr_copy(addr,
4454                                         &dev->data->hash_mac_addrs[index]);
4455                 else
4456                         rte_ether_addr_copy(&null_mac_addr,
4457                                         &dev->data->hash_mac_addrs[index]);
4458         }
4459
4460         return eth_err(port_id, ret);
4461 }
4462
4463 int
4464 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4465 {
4466         struct rte_eth_dev *dev;
4467
4468         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4469         dev = &rte_eth_devices[port_id];
4470
4471         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4472         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4473                                                                        on));
4474 }
4475
4476 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4477                                         uint16_t tx_rate)
4478 {
4479         struct rte_eth_dev *dev;
4480         struct rte_eth_dev_info dev_info;
4481         struct rte_eth_link link;
4482         int ret;
4483
4484         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4485         dev = &rte_eth_devices[port_id];
4486
4487         ret = rte_eth_dev_info_get(port_id, &dev_info);
4488         if (ret != 0)
4489                 return ret;
4490
4491         link = dev->data->dev_link;
4492
4493         if (queue_idx > dev_info.max_tx_queues) {
4494                 RTE_ETHDEV_LOG(ERR,
4495                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4496                         port_id, queue_idx);
4497                 return -EINVAL;
4498         }
4499
4500         if (tx_rate > link.link_speed) {
4501                 RTE_ETHDEV_LOG(ERR,
4502                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4503                         tx_rate, link.link_speed);
4504                 return -EINVAL;
4505         }
4506
4507         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4508         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4509                                                         queue_idx, tx_rate));
4510 }
4511
4512 int
4513 rte_eth_mirror_rule_set(uint16_t port_id,
4514                         struct rte_eth_mirror_conf *mirror_conf,
4515                         uint8_t rule_id, uint8_t on)
4516 {
4517         struct rte_eth_dev *dev;
4518
4519         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4520         dev = &rte_eth_devices[port_id];
4521
4522         if (mirror_conf == NULL) {
4523                 RTE_ETHDEV_LOG(ERR,
4524                         "Cannot set ethdev port %u mirror rule from NULL config\n",
4525                         port_id);
4526                 return -EINVAL;
4527         }
4528
4529         if (mirror_conf->rule_type == 0) {
4530                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4531                 return -EINVAL;
4532         }
4533
4534         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4535                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4536                         ETH_64_POOLS - 1);
4537                 return -EINVAL;
4538         }
4539
4540         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4541              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4542             (mirror_conf->pool_mask == 0)) {
4543                 RTE_ETHDEV_LOG(ERR,
4544                         "Invalid mirror pool, pool mask can not be 0\n");
4545                 return -EINVAL;
4546         }
4547
4548         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4549             mirror_conf->vlan.vlan_mask == 0) {
4550                 RTE_ETHDEV_LOG(ERR,
4551                         "Invalid vlan mask, vlan mask can not be 0\n");
4552                 return -EINVAL;
4553         }
4554
4555         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4556
4557         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4558                                                 mirror_conf, rule_id, on));
4559 }
4560
4561 int
4562 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4563 {
4564         struct rte_eth_dev *dev;
4565
4566         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4567         dev = &rte_eth_devices[port_id];
4568
4569         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4570         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev, rule_id));
4571 }
4572
4573 RTE_INIT(eth_dev_init_cb_lists)
4574 {
4575         uint16_t i;
4576
4577         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4578                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4579 }
4580
4581 int
4582 rte_eth_dev_callback_register(uint16_t port_id,
4583                         enum rte_eth_event_type event,
4584                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4585 {
4586         struct rte_eth_dev *dev;
4587         struct rte_eth_dev_callback *user_cb;
4588         uint16_t next_port;
4589         uint16_t last_port;
4590
4591         if (cb_fn == NULL) {
4592                 RTE_ETHDEV_LOG(ERR,
4593                         "Cannot register ethdev port %u callback from NULL\n",
4594                         port_id);
4595                 return -EINVAL;
4596         }
4597
4598         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4599                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4600                 return -EINVAL;
4601         }
4602
4603         if (port_id == RTE_ETH_ALL) {
4604                 next_port = 0;
4605                 last_port = RTE_MAX_ETHPORTS - 1;
4606         } else {
4607                 next_port = last_port = port_id;
4608         }
4609
4610         rte_spinlock_lock(&eth_dev_cb_lock);
4611
4612         do {
4613                 dev = &rte_eth_devices[next_port];
4614
4615                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4616                         if (user_cb->cb_fn == cb_fn &&
4617                                 user_cb->cb_arg == cb_arg &&
4618                                 user_cb->event == event) {
4619                                 break;
4620                         }
4621                 }
4622
4623                 /* create a new callback. */
4624                 if (user_cb == NULL) {
4625                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4626                                 sizeof(struct rte_eth_dev_callback), 0);
4627                         if (user_cb != NULL) {
4628                                 user_cb->cb_fn = cb_fn;
4629                                 user_cb->cb_arg = cb_arg;
4630                                 user_cb->event = event;
4631                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4632                                                   user_cb, next);
4633                         } else {
4634                                 rte_spinlock_unlock(&eth_dev_cb_lock);
4635                                 rte_eth_dev_callback_unregister(port_id, event,
4636                                                                 cb_fn, cb_arg);
4637                                 return -ENOMEM;
4638                         }
4639
4640                 }
4641         } while (++next_port <= last_port);
4642
4643         rte_spinlock_unlock(&eth_dev_cb_lock);
4644         return 0;
4645 }
4646
4647 int
4648 rte_eth_dev_callback_unregister(uint16_t port_id,
4649                         enum rte_eth_event_type event,
4650                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4651 {
4652         int ret;
4653         struct rte_eth_dev *dev;
4654         struct rte_eth_dev_callback *cb, *next;
4655         uint16_t next_port;
4656         uint16_t last_port;
4657
4658         if (cb_fn == NULL) {
4659                 RTE_ETHDEV_LOG(ERR,
4660                         "Cannot unregister ethdev port %u callback from NULL\n",
4661                         port_id);
4662                 return -EINVAL;
4663         }
4664
4665         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4666                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4667                 return -EINVAL;
4668         }
4669
4670         if (port_id == RTE_ETH_ALL) {
4671                 next_port = 0;
4672                 last_port = RTE_MAX_ETHPORTS - 1;
4673         } else {
4674                 next_port = last_port = port_id;
4675         }
4676
4677         rte_spinlock_lock(&eth_dev_cb_lock);
4678
4679         do {
4680                 dev = &rte_eth_devices[next_port];
4681                 ret = 0;
4682                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4683                      cb = next) {
4684
4685                         next = TAILQ_NEXT(cb, next);
4686
4687                         if (cb->cb_fn != cb_fn || cb->event != event ||
4688                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4689                                 continue;
4690
4691                         /*
4692                          * if this callback is not executing right now,
4693                          * then remove it.
4694                          */
4695                         if (cb->active == 0) {
4696                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4697                                 rte_free(cb);
4698                         } else {
4699                                 ret = -EAGAIN;
4700                         }
4701                 }
4702         } while (++next_port <= last_port);
4703
4704         rte_spinlock_unlock(&eth_dev_cb_lock);
4705         return ret;
4706 }
4707
4708 int
4709 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4710         enum rte_eth_event_type event, void *ret_param)
4711 {
4712         struct rte_eth_dev_callback *cb_lst;
4713         struct rte_eth_dev_callback dev_cb;
4714         int rc = 0;
4715
4716         rte_spinlock_lock(&eth_dev_cb_lock);
4717         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4718                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4719                         continue;
4720                 dev_cb = *cb_lst;
4721                 cb_lst->active = 1;
4722                 if (ret_param != NULL)
4723                         dev_cb.ret_param = ret_param;
4724
4725                 rte_spinlock_unlock(&eth_dev_cb_lock);
4726                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4727                                 dev_cb.cb_arg, dev_cb.ret_param);
4728                 rte_spinlock_lock(&eth_dev_cb_lock);
4729                 cb_lst->active = 0;
4730         }
4731         rte_spinlock_unlock(&eth_dev_cb_lock);
4732         return rc;
4733 }
4734
4735 void
4736 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4737 {
4738         if (dev == NULL)
4739                 return;
4740
4741         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4742
4743         dev->state = RTE_ETH_DEV_ATTACHED;
4744 }
4745
4746 int
4747 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4748 {
4749         uint32_t vec;
4750         struct rte_eth_dev *dev;
4751         struct rte_intr_handle *intr_handle;
4752         uint16_t qid;
4753         int rc;
4754
4755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4756         dev = &rte_eth_devices[port_id];
4757
4758         if (!dev->intr_handle) {
4759                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4760                 return -ENOTSUP;
4761         }
4762
4763         intr_handle = dev->intr_handle;
4764         if (!intr_handle->intr_vec) {
4765                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4766                 return -EPERM;
4767         }
4768
4769         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4770                 vec = intr_handle->intr_vec[qid];
4771                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4772                 if (rc && rc != -EEXIST) {
4773                         RTE_ETHDEV_LOG(ERR,
4774                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4775                                 port_id, qid, op, epfd, vec);
4776                 }
4777         }
4778
4779         return 0;
4780 }
4781
4782 int
4783 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4784 {
4785         struct rte_intr_handle *intr_handle;
4786         struct rte_eth_dev *dev;
4787         unsigned int efd_idx;
4788         uint32_t vec;
4789         int fd;
4790
4791         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4792         dev = &rte_eth_devices[port_id];
4793
4794         if (queue_id >= dev->data->nb_rx_queues) {
4795                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4796                 return -1;
4797         }
4798
4799         if (!dev->intr_handle) {
4800                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4801                 return -1;
4802         }
4803
4804         intr_handle = dev->intr_handle;
4805         if (!intr_handle->intr_vec) {
4806                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4807                 return -1;
4808         }
4809
4810         vec = intr_handle->intr_vec[queue_id];
4811         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4812                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4813         fd = intr_handle->efds[efd_idx];
4814
4815         return fd;
4816 }
4817
4818 static inline int
4819 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4820                 const char *ring_name)
4821 {
4822         return snprintf(name, len, "eth_p%d_q%d_%s",
4823                         port_id, queue_id, ring_name);
4824 }
4825
4826 const struct rte_memzone *
4827 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4828                          uint16_t queue_id, size_t size, unsigned align,
4829                          int socket_id)
4830 {
4831         char z_name[RTE_MEMZONE_NAMESIZE];
4832         const struct rte_memzone *mz;
4833         int rc;
4834
4835         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4836                         queue_id, ring_name);
4837         if (rc >= RTE_MEMZONE_NAMESIZE) {
4838                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4839                 rte_errno = ENAMETOOLONG;
4840                 return NULL;
4841         }
4842
4843         mz = rte_memzone_lookup(z_name);
4844         if (mz) {
4845                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4846                                 size > mz->len ||
4847                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4848                         RTE_ETHDEV_LOG(ERR,
4849                                 "memzone %s does not justify the requested attributes\n",
4850                                 mz->name);
4851                         return NULL;
4852                 }
4853
4854                 return mz;
4855         }
4856
4857         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4858                         RTE_MEMZONE_IOVA_CONTIG, align);
4859 }
4860
4861 int
4862 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4863                 uint16_t queue_id)
4864 {
4865         char z_name[RTE_MEMZONE_NAMESIZE];
4866         const struct rte_memzone *mz;
4867         int rc = 0;
4868
4869         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4870                         queue_id, ring_name);
4871         if (rc >= RTE_MEMZONE_NAMESIZE) {
4872                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4873                 return -ENAMETOOLONG;
4874         }
4875
4876         mz = rte_memzone_lookup(z_name);
4877         if (mz)
4878                 rc = rte_memzone_free(mz);
4879         else
4880                 rc = -ENOENT;
4881
4882         return rc;
4883 }
4884
4885 int
4886 rte_eth_dev_create(struct rte_device *device, const char *name,
4887         size_t priv_data_size,
4888         ethdev_bus_specific_init ethdev_bus_specific_init,
4889         void *bus_init_params,
4890         ethdev_init_t ethdev_init, void *init_params)
4891 {
4892         struct rte_eth_dev *ethdev;
4893         int retval;
4894
4895         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4896
4897         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4898                 ethdev = rte_eth_dev_allocate(name);
4899                 if (!ethdev)
4900                         return -ENODEV;
4901
4902                 if (priv_data_size) {
4903                         ethdev->data->dev_private = rte_zmalloc_socket(
4904                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4905                                 device->numa_node);
4906
4907                         if (!ethdev->data->dev_private) {
4908                                 RTE_ETHDEV_LOG(ERR,
4909                                         "failed to allocate private data\n");
4910                                 retval = -ENOMEM;
4911                                 goto probe_failed;
4912                         }
4913                 }
4914         } else {
4915                 ethdev = rte_eth_dev_attach_secondary(name);
4916                 if (!ethdev) {
4917                         RTE_ETHDEV_LOG(ERR,
4918                                 "secondary process attach failed, ethdev doesn't exist\n");
4919                         return  -ENODEV;
4920                 }
4921         }
4922
4923         ethdev->device = device;
4924
4925         if (ethdev_bus_specific_init) {
4926                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4927                 if (retval) {
4928                         RTE_ETHDEV_LOG(ERR,
4929                                 "ethdev bus specific initialisation failed\n");
4930                         goto probe_failed;
4931                 }
4932         }
4933
4934         retval = ethdev_init(ethdev, init_params);
4935         if (retval) {
4936                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4937                 goto probe_failed;
4938         }
4939
4940         rte_eth_dev_probing_finish(ethdev);
4941
4942         return retval;
4943
4944 probe_failed:
4945         rte_eth_dev_release_port(ethdev);
4946         return retval;
4947 }
4948
4949 int
4950 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4951         ethdev_uninit_t ethdev_uninit)
4952 {
4953         int ret;
4954
4955         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4956         if (!ethdev)
4957                 return -ENODEV;
4958
4959         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4960
4961         ret = ethdev_uninit(ethdev);
4962         if (ret)
4963                 return ret;
4964
4965         return rte_eth_dev_release_port(ethdev);
4966 }
4967
4968 int
4969 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4970                           int epfd, int op, void *data)
4971 {
4972         uint32_t vec;
4973         struct rte_eth_dev *dev;
4974         struct rte_intr_handle *intr_handle;
4975         int rc;
4976
4977         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4978         dev = &rte_eth_devices[port_id];
4979
4980         if (queue_id >= dev->data->nb_rx_queues) {
4981                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4982                 return -EINVAL;
4983         }
4984
4985         if (!dev->intr_handle) {
4986                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4987                 return -ENOTSUP;
4988         }
4989
4990         intr_handle = dev->intr_handle;
4991         if (!intr_handle->intr_vec) {
4992                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4993                 return -EPERM;
4994         }
4995
4996         vec = intr_handle->intr_vec[queue_id];
4997         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4998         if (rc && rc != -EEXIST) {
4999                 RTE_ETHDEV_LOG(ERR,
5000                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
5001                         port_id, queue_id, op, epfd, vec);
5002                 return rc;
5003         }
5004
5005         return 0;
5006 }
5007
5008 int
5009 rte_eth_dev_rx_intr_enable(uint16_t port_id,
5010                            uint16_t queue_id)
5011 {
5012         struct rte_eth_dev *dev;
5013         int ret;
5014
5015         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5016         dev = &rte_eth_devices[port_id];
5017
5018         ret = eth_dev_validate_rx_queue(dev, queue_id);
5019         if (ret != 0)
5020                 return ret;
5021
5022         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
5023         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
5024 }
5025
5026 int
5027 rte_eth_dev_rx_intr_disable(uint16_t port_id,
5028                             uint16_t queue_id)
5029 {
5030         struct rte_eth_dev *dev;
5031         int ret;
5032
5033         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5034         dev = &rte_eth_devices[port_id];
5035
5036         ret = eth_dev_validate_rx_queue(dev, queue_id);
5037         if (ret != 0)
5038                 return ret;
5039
5040         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
5041         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
5042 }
5043
5044
5045 const struct rte_eth_rxtx_callback *
5046 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
5047                 rte_rx_callback_fn fn, void *user_param)
5048 {
5049 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5050         rte_errno = ENOTSUP;
5051         return NULL;
5052 #endif
5053         struct rte_eth_dev *dev;
5054
5055         /* check input parameters */
5056         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5057                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5058                 rte_errno = EINVAL;
5059                 return NULL;
5060         }
5061         dev = &rte_eth_devices[port_id];
5062         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5063                 rte_errno = EINVAL;
5064                 return NULL;
5065         }
5066         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5067
5068         if (cb == NULL) {
5069                 rte_errno = ENOMEM;
5070                 return NULL;
5071         }
5072
5073         cb->fn.rx = fn;
5074         cb->param = user_param;
5075
5076         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5077         /* Add the callbacks in fifo order. */
5078         struct rte_eth_rxtx_callback *tail =
5079                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5080
5081         if (!tail) {
5082                 /* Stores to cb->fn and cb->param should complete before
5083                  * cb is visible to data plane.
5084                  */
5085                 __atomic_store_n(
5086                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5087                         cb, __ATOMIC_RELEASE);
5088
5089         } else {
5090                 while (tail->next)
5091                         tail = tail->next;
5092                 /* Stores to cb->fn and cb->param should complete before
5093                  * cb is visible to data plane.
5094                  */
5095                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5096         }
5097         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5098
5099         return cb;
5100 }
5101
5102 const struct rte_eth_rxtx_callback *
5103 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5104                 rte_rx_callback_fn fn, void *user_param)
5105 {
5106 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5107         rte_errno = ENOTSUP;
5108         return NULL;
5109 #endif
5110         /* check input parameters */
5111         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5112                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5113                 rte_errno = EINVAL;
5114                 return NULL;
5115         }
5116
5117         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5118
5119         if (cb == NULL) {
5120                 rte_errno = ENOMEM;
5121                 return NULL;
5122         }
5123
5124         cb->fn.rx = fn;
5125         cb->param = user_param;
5126
5127         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5128         /* Add the callbacks at first position */
5129         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5130         /* Stores to cb->fn, cb->param and cb->next should complete before
5131          * cb is visible to data plane threads.
5132          */
5133         __atomic_store_n(
5134                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5135                 cb, __ATOMIC_RELEASE);
5136         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5137
5138         return cb;
5139 }
5140
5141 const struct rte_eth_rxtx_callback *
5142 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5143                 rte_tx_callback_fn fn, void *user_param)
5144 {
5145 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5146         rte_errno = ENOTSUP;
5147         return NULL;
5148 #endif
5149         struct rte_eth_dev *dev;
5150
5151         /* check input parameters */
5152         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5153                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
5154                 rte_errno = EINVAL;
5155                 return NULL;
5156         }
5157
5158         dev = &rte_eth_devices[port_id];
5159         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5160                 rte_errno = EINVAL;
5161                 return NULL;
5162         }
5163
5164         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5165
5166         if (cb == NULL) {
5167                 rte_errno = ENOMEM;
5168                 return NULL;
5169         }
5170
5171         cb->fn.tx = fn;
5172         cb->param = user_param;
5173
5174         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5175         /* Add the callbacks in fifo order. */
5176         struct rte_eth_rxtx_callback *tail =
5177                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
5178
5179         if (!tail) {
5180                 /* Stores to cb->fn and cb->param should complete before
5181                  * cb is visible to data plane.
5182                  */
5183                 __atomic_store_n(
5184                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
5185                         cb, __ATOMIC_RELEASE);
5186
5187         } else {
5188                 while (tail->next)
5189                         tail = tail->next;
5190                 /* Stores to cb->fn and cb->param should complete before
5191                  * cb is visible to data plane.
5192                  */
5193                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5194         }
5195         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5196
5197         return cb;
5198 }
5199
5200 int
5201 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5202                 const struct rte_eth_rxtx_callback *user_cb)
5203 {
5204 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5205         return -ENOTSUP;
5206 #endif
5207         /* Check input parameters. */
5208         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5209         if (user_cb == NULL ||
5210                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
5211                 return -EINVAL;
5212
5213         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5214         struct rte_eth_rxtx_callback *cb;
5215         struct rte_eth_rxtx_callback **prev_cb;
5216         int ret = -EINVAL;
5217
5218         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5219         prev_cb = &dev->post_rx_burst_cbs[queue_id];
5220         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5221                 cb = *prev_cb;
5222                 if (cb == user_cb) {
5223                         /* Remove the user cb from the callback list. */
5224                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5225                         ret = 0;
5226                         break;
5227                 }
5228         }
5229         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5230
5231         return ret;
5232 }
5233
5234 int
5235 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5236                 const struct rte_eth_rxtx_callback *user_cb)
5237 {
5238 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5239         return -ENOTSUP;
5240 #endif
5241         /* Check input parameters. */
5242         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5243         if (user_cb == NULL ||
5244                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5245                 return -EINVAL;
5246
5247         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5248         int ret = -EINVAL;
5249         struct rte_eth_rxtx_callback *cb;
5250         struct rte_eth_rxtx_callback **prev_cb;
5251
5252         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5253         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5254         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5255                 cb = *prev_cb;
5256                 if (cb == user_cb) {
5257                         /* Remove the user cb from the callback list. */
5258                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5259                         ret = 0;
5260                         break;
5261                 }
5262         }
5263         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5264
5265         return ret;
5266 }
5267
5268 int
5269 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5270         struct rte_eth_rxq_info *qinfo)
5271 {
5272         struct rte_eth_dev *dev;
5273
5274         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5275         dev = &rte_eth_devices[port_id];
5276
5277         if (queue_id >= dev->data->nb_rx_queues) {
5278                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5279                 return -EINVAL;
5280         }
5281
5282         if (qinfo == NULL) {
5283                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5284                         port_id, queue_id);
5285                 return -EINVAL;
5286         }
5287
5288         if (dev->data->rx_queues == NULL ||
5289                         dev->data->rx_queues[queue_id] == NULL) {
5290                 RTE_ETHDEV_LOG(ERR,
5291                                "Rx queue %"PRIu16" of device with port_id=%"
5292                                PRIu16" has not been setup\n",
5293                                queue_id, port_id);
5294                 return -EINVAL;
5295         }
5296
5297         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5298                 RTE_ETHDEV_LOG(INFO,
5299                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5300                         queue_id, port_id);
5301                 return -EINVAL;
5302         }
5303
5304         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5305
5306         memset(qinfo, 0, sizeof(*qinfo));
5307         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5308         qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5309
5310         return 0;
5311 }
5312
5313 int
5314 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5315         struct rte_eth_txq_info *qinfo)
5316 {
5317         struct rte_eth_dev *dev;
5318
5319         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5320         dev = &rte_eth_devices[port_id];
5321
5322         if (queue_id >= dev->data->nb_tx_queues) {
5323                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5324                 return -EINVAL;
5325         }
5326
5327         if (qinfo == NULL) {
5328                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5329                         port_id, queue_id);
5330                 return -EINVAL;
5331         }
5332
5333         if (dev->data->tx_queues == NULL ||
5334                         dev->data->tx_queues[queue_id] == NULL) {
5335                 RTE_ETHDEV_LOG(ERR,
5336                                "Tx queue %"PRIu16" of device with port_id=%"
5337                                PRIu16" has not been setup\n",
5338                                queue_id, port_id);
5339                 return -EINVAL;
5340         }
5341
5342         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5343                 RTE_ETHDEV_LOG(INFO,
5344                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5345                         queue_id, port_id);
5346                 return -EINVAL;
5347         }
5348
5349         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5350
5351         memset(qinfo, 0, sizeof(*qinfo));
5352         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5353         qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5354
5355         return 0;
5356 }
5357
5358 int
5359 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5360                           struct rte_eth_burst_mode *mode)
5361 {
5362         struct rte_eth_dev *dev;
5363
5364         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5365         dev = &rte_eth_devices[port_id];
5366
5367         if (queue_id >= dev->data->nb_rx_queues) {
5368                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5369                 return -EINVAL;
5370         }
5371
5372         if (mode == NULL) {
5373                 RTE_ETHDEV_LOG(ERR,
5374                         "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5375                         port_id, queue_id);
5376                 return -EINVAL;
5377         }
5378
5379         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5380         memset(mode, 0, sizeof(*mode));
5381         return eth_err(port_id,
5382                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5383 }
5384
5385 int
5386 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5387                           struct rte_eth_burst_mode *mode)
5388 {
5389         struct rte_eth_dev *dev;
5390
5391         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5392         dev = &rte_eth_devices[port_id];
5393
5394         if (queue_id >= dev->data->nb_tx_queues) {
5395                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5396                 return -EINVAL;
5397         }
5398
5399         if (mode == NULL) {
5400                 RTE_ETHDEV_LOG(ERR,
5401                         "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5402                         port_id, queue_id);
5403                 return -EINVAL;
5404         }
5405
5406         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5407         memset(mode, 0, sizeof(*mode));
5408         return eth_err(port_id,
5409                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5410 }
5411
5412 int
5413 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5414                 struct rte_power_monitor_cond *pmc)
5415 {
5416         struct rte_eth_dev *dev;
5417
5418         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5419         dev = &rte_eth_devices[port_id];
5420
5421         if (queue_id >= dev->data->nb_rx_queues) {
5422                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5423                 return -EINVAL;
5424         }
5425
5426         if (pmc == NULL) {
5427                 RTE_ETHDEV_LOG(ERR,
5428                         "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5429                         port_id, queue_id);
5430                 return -EINVAL;
5431         }
5432
5433         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
5434         return eth_err(port_id,
5435                 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5436 }
5437
5438 int
5439 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5440                              struct rte_ether_addr *mc_addr_set,
5441                              uint32_t nb_mc_addr)
5442 {
5443         struct rte_eth_dev *dev;
5444
5445         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5446         dev = &rte_eth_devices[port_id];
5447
5448         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5449         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5450                                                 mc_addr_set, nb_mc_addr));
5451 }
5452
5453 int
5454 rte_eth_timesync_enable(uint16_t port_id)
5455 {
5456         struct rte_eth_dev *dev;
5457
5458         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5459         dev = &rte_eth_devices[port_id];
5460
5461         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5462         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5463 }
5464
5465 int
5466 rte_eth_timesync_disable(uint16_t port_id)
5467 {
5468         struct rte_eth_dev *dev;
5469
5470         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5471         dev = &rte_eth_devices[port_id];
5472
5473         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5474         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5475 }
5476
5477 int
5478 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5479                                    uint32_t flags)
5480 {
5481         struct rte_eth_dev *dev;
5482
5483         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5484         dev = &rte_eth_devices[port_id];
5485
5486         if (timestamp == NULL) {
5487                 RTE_ETHDEV_LOG(ERR,
5488                         "Cannot read ethdev port %u Rx timestamp to NULL\n",
5489                         port_id);
5490                 return -EINVAL;
5491         }
5492
5493         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5494         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5495                                 (dev, timestamp, flags));
5496 }
5497
5498 int
5499 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5500                                    struct timespec *timestamp)
5501 {
5502         struct rte_eth_dev *dev;
5503
5504         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5505         dev = &rte_eth_devices[port_id];
5506
5507         if (timestamp == NULL) {
5508                 RTE_ETHDEV_LOG(ERR,
5509                         "Cannot read ethdev port %u Tx timestamp to NULL\n",
5510                         port_id);
5511                 return -EINVAL;
5512         }
5513
5514         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5515         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5516                                 (dev, timestamp));
5517 }
5518
5519 int
5520 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5521 {
5522         struct rte_eth_dev *dev;
5523
5524         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5525         dev = &rte_eth_devices[port_id];
5526
5527         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5528         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5529 }
5530
5531 int
5532 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5533 {
5534         struct rte_eth_dev *dev;
5535
5536         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5537         dev = &rte_eth_devices[port_id];
5538
5539         if (timestamp == NULL) {
5540                 RTE_ETHDEV_LOG(ERR,
5541                         "Cannot read ethdev port %u timesync time to NULL\n",
5542                         port_id);
5543                 return -EINVAL;
5544         }
5545
5546         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5547         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5548                                                                 timestamp));
5549 }
5550
5551 int
5552 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5553 {
5554         struct rte_eth_dev *dev;
5555
5556         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5557         dev = &rte_eth_devices[port_id];
5558
5559         if (timestamp == NULL) {
5560                 RTE_ETHDEV_LOG(ERR,
5561                         "Cannot write ethdev port %u timesync from NULL time\n",
5562                         port_id);
5563                 return -EINVAL;
5564         }
5565
5566         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5567         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5568                                                                 timestamp));
5569 }
5570
5571 int
5572 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5573 {
5574         struct rte_eth_dev *dev;
5575
5576         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5577         dev = &rte_eth_devices[port_id];
5578
5579         if (clock == NULL) {
5580                 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5581                         port_id);
5582                 return -EINVAL;
5583         }
5584
5585         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5586         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5587 }
5588
5589 int
5590 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5591 {
5592         struct rte_eth_dev *dev;
5593
5594         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5595         dev = &rte_eth_devices[port_id];
5596
5597         if (info == NULL) {
5598                 RTE_ETHDEV_LOG(ERR,
5599                         "Cannot get ethdev port %u register info to NULL\n",
5600                         port_id);
5601                 return -EINVAL;
5602         }
5603
5604         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5605         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5606 }
5607
5608 int
5609 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5610 {
5611         struct rte_eth_dev *dev;
5612
5613         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5614         dev = &rte_eth_devices[port_id];
5615
5616         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5617         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5618 }
5619
5620 int
5621 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5622 {
5623         struct rte_eth_dev *dev;
5624
5625         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5626         dev = &rte_eth_devices[port_id];
5627
5628         if (info == NULL) {
5629                 RTE_ETHDEV_LOG(ERR,
5630                         "Cannot get ethdev port %u EEPROM info to NULL\n",
5631                         port_id);
5632                 return -EINVAL;
5633         }
5634
5635         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5636         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5637 }
5638
5639 int
5640 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5641 {
5642         struct rte_eth_dev *dev;
5643
5644         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5645         dev = &rte_eth_devices[port_id];
5646
5647         if (info == NULL) {
5648                 RTE_ETHDEV_LOG(ERR,
5649                         "Cannot set ethdev port %u EEPROM from NULL info\n",
5650                         port_id);
5651                 return -EINVAL;
5652         }
5653
5654         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5655         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5656 }
5657
5658 int
5659 rte_eth_dev_get_module_info(uint16_t port_id,
5660                             struct rte_eth_dev_module_info *modinfo)
5661 {
5662         struct rte_eth_dev *dev;
5663
5664         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5665         dev = &rte_eth_devices[port_id];
5666
5667         if (modinfo == NULL) {
5668                 RTE_ETHDEV_LOG(ERR,
5669                         "Cannot get ethdev port %u EEPROM module info to NULL\n",
5670                         port_id);
5671                 return -EINVAL;
5672         }
5673
5674         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5675         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5676 }
5677
5678 int
5679 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5680                               struct rte_dev_eeprom_info *info)
5681 {
5682         struct rte_eth_dev *dev;
5683
5684         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5685         dev = &rte_eth_devices[port_id];
5686
5687         if (info == NULL) {
5688                 RTE_ETHDEV_LOG(ERR,
5689                         "Cannot get ethdev port %u module EEPROM info to NULL\n",
5690                         port_id);
5691                 return -EINVAL;
5692         }
5693
5694         if (info->data == NULL) {
5695                 RTE_ETHDEV_LOG(ERR,
5696                         "Cannot get ethdev port %u module EEPROM data to NULL\n",
5697                         port_id);
5698                 return -EINVAL;
5699         }
5700
5701         if (info->length == 0) {
5702                 RTE_ETHDEV_LOG(ERR,
5703                         "Cannot get ethdev port %u module EEPROM to data with zero size\n",
5704                         port_id);
5705                 return -EINVAL;
5706         }
5707
5708         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5709         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5710 }
5711
5712 int
5713 rte_eth_dev_get_dcb_info(uint16_t port_id,
5714                              struct rte_eth_dcb_info *dcb_info)
5715 {
5716         struct rte_eth_dev *dev;
5717
5718         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5719         dev = &rte_eth_devices[port_id];
5720
5721         if (dcb_info == NULL) {
5722                 RTE_ETHDEV_LOG(ERR,
5723                         "Cannot get ethdev port %u DCB info to NULL\n",
5724                         port_id);
5725                 return -EINVAL;
5726         }
5727
5728         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5729
5730         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5731         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5732 }
5733
5734 static void
5735 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5736                 const struct rte_eth_desc_lim *desc_lim)
5737 {
5738         if (desc_lim->nb_align != 0)
5739                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5740
5741         if (desc_lim->nb_max != 0)
5742                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5743
5744         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5745 }
5746
5747 int
5748 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5749                                  uint16_t *nb_rx_desc,
5750                                  uint16_t *nb_tx_desc)
5751 {
5752         struct rte_eth_dev_info dev_info;
5753         int ret;
5754
5755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5756
5757         ret = rte_eth_dev_info_get(port_id, &dev_info);
5758         if (ret != 0)
5759                 return ret;
5760
5761         if (nb_rx_desc != NULL)
5762                 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5763
5764         if (nb_tx_desc != NULL)
5765                 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5766
5767         return 0;
5768 }
5769
5770 int
5771 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5772                                    struct rte_eth_hairpin_cap *cap)
5773 {
5774         struct rte_eth_dev *dev;
5775
5776         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5777         dev = &rte_eth_devices[port_id];
5778
5779         if (cap == NULL) {
5780                 RTE_ETHDEV_LOG(ERR,
5781                         "Cannot get ethdev port %u hairpin capability to NULL\n",
5782                         port_id);
5783                 return -EINVAL;
5784         }
5785
5786         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5787         memset(cap, 0, sizeof(*cap));
5788         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5789 }
5790
5791 int
5792 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5793 {
5794         if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5795                 return 1;
5796         return 0;
5797 }
5798
5799 int
5800 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5801 {
5802         if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5803                 return 1;
5804         return 0;
5805 }
5806
5807 int
5808 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5809 {
5810         struct rte_eth_dev *dev;
5811
5812         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5813         dev = &rte_eth_devices[port_id];
5814
5815         if (pool == NULL) {
5816                 RTE_ETHDEV_LOG(ERR,
5817                         "Cannot test ethdev port %u mempool operation from NULL pool\n",
5818                         port_id);
5819                 return -EINVAL;
5820         }
5821
5822         if (*dev->dev_ops->pool_ops_supported == NULL)
5823                 return 1; /* all pools are supported */
5824
5825         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5826 }
5827
5828 /**
5829  * A set of values to describe the possible states of a switch domain.
5830  */
5831 enum rte_eth_switch_domain_state {
5832         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5833         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5834 };
5835
5836 /**
5837  * Array of switch domains available for allocation. Array is sized to
5838  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5839  * ethdev ports in a single process.
5840  */
5841 static struct rte_eth_dev_switch {
5842         enum rte_eth_switch_domain_state state;
5843 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
5844
5845 int
5846 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5847 {
5848         uint16_t i;
5849
5850         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5851
5852         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5853                 if (eth_dev_switch_domains[i].state ==
5854                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5855                         eth_dev_switch_domains[i].state =
5856                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5857                         *domain_id = i;
5858                         return 0;
5859                 }
5860         }
5861
5862         return -ENOSPC;
5863 }
5864
5865 int
5866 rte_eth_switch_domain_free(uint16_t domain_id)
5867 {
5868         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5869                 domain_id >= RTE_MAX_ETHPORTS)
5870                 return -EINVAL;
5871
5872         if (eth_dev_switch_domains[domain_id].state !=
5873                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5874                 return -EINVAL;
5875
5876         eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5877
5878         return 0;
5879 }
5880
5881 static int
5882 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5883 {
5884         int state;
5885         struct rte_kvargs_pair *pair;
5886         char *letter;
5887
5888         arglist->str = strdup(str_in);
5889         if (arglist->str == NULL)
5890                 return -ENOMEM;
5891
5892         letter = arglist->str;
5893         state = 0;
5894         arglist->count = 0;
5895         pair = &arglist->pairs[0];
5896         while (1) {
5897                 switch (state) {
5898                 case 0: /* Initial */
5899                         if (*letter == '=')
5900                                 return -EINVAL;
5901                         else if (*letter == '\0')
5902                                 return 0;
5903
5904                         state = 1;
5905                         pair->key = letter;
5906                         /* fall-thru */
5907
5908                 case 1: /* Parsing key */
5909                         if (*letter == '=') {
5910                                 *letter = '\0';
5911                                 pair->value = letter + 1;
5912                                 state = 2;
5913                         } else if (*letter == ',' || *letter == '\0')
5914                                 return -EINVAL;
5915                         break;
5916
5917
5918                 case 2: /* Parsing value */
5919                         if (*letter == '[')
5920                                 state = 3;
5921                         else if (*letter == ',') {
5922                                 *letter = '\0';
5923                                 arglist->count++;
5924                                 pair = &arglist->pairs[arglist->count];
5925                                 state = 0;
5926                         } else if (*letter == '\0') {
5927                                 letter--;
5928                                 arglist->count++;
5929                                 pair = &arglist->pairs[arglist->count];
5930                                 state = 0;
5931                         }
5932                         break;
5933
5934                 case 3: /* Parsing list */
5935                         if (*letter == ']')
5936                                 state = 2;
5937                         else if (*letter == '\0')
5938                                 return -EINVAL;
5939                         break;
5940                 }
5941                 letter++;
5942         }
5943 }
5944
5945 int
5946 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5947 {
5948         struct rte_kvargs args;
5949         struct rte_kvargs_pair *pair;
5950         unsigned int i;
5951         int result = 0;
5952
5953         memset(eth_da, 0, sizeof(*eth_da));
5954
5955         result = eth_dev_devargs_tokenise(&args, dargs);
5956         if (result < 0)
5957                 goto parse_cleanup;
5958
5959         for (i = 0; i < args.count; i++) {
5960                 pair = &args.pairs[i];
5961                 if (strcmp("representor", pair->key) == 0) {
5962                         if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
5963                                 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
5964                                         dargs);
5965                                 result = -1;
5966                                 goto parse_cleanup;
5967                         }
5968                         result = rte_eth_devargs_parse_representor_ports(
5969                                         pair->value, eth_da);
5970                         if (result < 0)
5971                                 goto parse_cleanup;
5972                 }
5973         }
5974
5975 parse_cleanup:
5976         if (args.str)
5977                 free(args.str);
5978
5979         return result;
5980 }
5981
5982 int
5983 rte_eth_representor_id_get(const struct rte_eth_dev *ethdev,
5984                            enum rte_eth_representor_type type,
5985                            int controller, int pf, int representor_port,
5986                            uint16_t *repr_id)
5987 {
5988         int ret, n, i, count;
5989         struct rte_eth_representor_info *info = NULL;
5990         size_t size;
5991
5992         if (type == RTE_ETH_REPRESENTOR_NONE)
5993                 return 0;
5994         if (repr_id == NULL)
5995                 return -EINVAL;
5996
5997         /* Get PMD representor range info. */
5998         ret = rte_eth_representor_info_get(ethdev->data->port_id, NULL);
5999         if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
6000             controller == -1 && pf == -1) {
6001                 /* Direct mapping for legacy VF representor. */
6002                 *repr_id = representor_port;
6003                 return 0;
6004         } else if (ret < 0) {
6005                 return ret;
6006         }
6007         n = ret;
6008         size = sizeof(*info) + n * sizeof(info->ranges[0]);
6009         info = calloc(1, size);
6010         if (info == NULL)
6011                 return -ENOMEM;
6012         ret = rte_eth_representor_info_get(ethdev->data->port_id, info);
6013         if (ret < 0)
6014                 goto out;
6015
6016         /* Default controller and pf to caller. */
6017         if (controller == -1)
6018                 controller = info->controller;
6019         if (pf == -1)
6020                 pf = info->pf;
6021
6022         /* Locate representor ID. */
6023         ret = -ENOENT;
6024         for (i = 0; i < n; ++i) {
6025                 if (info->ranges[i].type != type)
6026                         continue;
6027                 if (info->ranges[i].controller != controller)
6028                         continue;
6029                 if (info->ranges[i].id_end < info->ranges[i].id_base) {
6030                         RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
6031                                 ethdev->data->port_id, info->ranges[i].id_base,
6032                                 info->ranges[i].id_end, i);
6033                         continue;
6034
6035                 }
6036                 count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
6037                 switch (info->ranges[i].type) {
6038                 case RTE_ETH_REPRESENTOR_PF:
6039                         if (pf < info->ranges[i].pf ||
6040                             pf >= info->ranges[i].pf + count)
6041                                 continue;
6042                         *repr_id = info->ranges[i].id_base +
6043                                    (pf - info->ranges[i].pf);
6044                         ret = 0;
6045                         goto out;
6046                 case RTE_ETH_REPRESENTOR_VF:
6047                         if (info->ranges[i].pf != pf)
6048                                 continue;
6049                         if (representor_port < info->ranges[i].vf ||
6050                             representor_port >= info->ranges[i].vf + count)
6051                                 continue;
6052                         *repr_id = info->ranges[i].id_base +
6053                                    (representor_port - info->ranges[i].vf);
6054                         ret = 0;
6055                         goto out;
6056                 case RTE_ETH_REPRESENTOR_SF:
6057                         if (info->ranges[i].pf != pf)
6058                                 continue;
6059                         if (representor_port < info->ranges[i].sf ||
6060                             representor_port >= info->ranges[i].sf + count)
6061                                 continue;
6062                         *repr_id = info->ranges[i].id_base +
6063                               (representor_port - info->ranges[i].sf);
6064                         ret = 0;
6065                         goto out;
6066                 default:
6067                         break;
6068                 }
6069         }
6070 out:
6071         free(info);
6072         return ret;
6073 }
6074
6075 static int
6076 eth_dev_handle_port_list(const char *cmd __rte_unused,
6077                 const char *params __rte_unused,
6078                 struct rte_tel_data *d)
6079 {
6080         int port_id;
6081
6082         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
6083         RTE_ETH_FOREACH_DEV(port_id)
6084                 rte_tel_data_add_array_int(d, port_id);
6085         return 0;
6086 }
6087
6088 static void
6089 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
6090                 const char *stat_name)
6091 {
6092         int q;
6093         struct rte_tel_data *q_data = rte_tel_data_alloc();
6094         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
6095         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
6096                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
6097         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
6098 }
6099
6100 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
6101
6102 static int
6103 eth_dev_handle_port_stats(const char *cmd __rte_unused,
6104                 const char *params,
6105                 struct rte_tel_data *d)
6106 {
6107         struct rte_eth_stats stats;
6108         int port_id, ret;
6109
6110         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6111                 return -1;
6112
6113         port_id = atoi(params);
6114         if (!rte_eth_dev_is_valid_port(port_id))
6115                 return -1;
6116
6117         ret = rte_eth_stats_get(port_id, &stats);
6118         if (ret < 0)
6119                 return -1;
6120
6121         rte_tel_data_start_dict(d);
6122         ADD_DICT_STAT(stats, ipackets);
6123         ADD_DICT_STAT(stats, opackets);
6124         ADD_DICT_STAT(stats, ibytes);
6125         ADD_DICT_STAT(stats, obytes);
6126         ADD_DICT_STAT(stats, imissed);
6127         ADD_DICT_STAT(stats, ierrors);
6128         ADD_DICT_STAT(stats, oerrors);
6129         ADD_DICT_STAT(stats, rx_nombuf);
6130         eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
6131         eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
6132         eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
6133         eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
6134         eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
6135
6136         return 0;
6137 }
6138
6139 static int
6140 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
6141                 const char *params,
6142                 struct rte_tel_data *d)
6143 {
6144         struct rte_eth_xstat *eth_xstats;
6145         struct rte_eth_xstat_name *xstat_names;
6146         int port_id, num_xstats;
6147         int i, ret;
6148         char *end_param;
6149
6150         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6151                 return -1;
6152
6153         port_id = strtoul(params, &end_param, 0);
6154         if (*end_param != '\0')
6155                 RTE_ETHDEV_LOG(NOTICE,
6156                         "Extra parameters passed to ethdev telemetry command, ignoring");
6157         if (!rte_eth_dev_is_valid_port(port_id))
6158                 return -1;
6159
6160         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
6161         if (num_xstats < 0)
6162                 return -1;
6163
6164         /* use one malloc for both names and stats */
6165         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
6166                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
6167         if (eth_xstats == NULL)
6168                 return -1;
6169         xstat_names = (void *)&eth_xstats[num_xstats];
6170
6171         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
6172         if (ret < 0 || ret > num_xstats) {
6173                 free(eth_xstats);
6174                 return -1;
6175         }
6176
6177         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
6178         if (ret < 0 || ret > num_xstats) {
6179                 free(eth_xstats);
6180                 return -1;
6181         }
6182
6183         rte_tel_data_start_dict(d);
6184         for (i = 0; i < num_xstats; i++)
6185                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
6186                                 eth_xstats[i].value);
6187         return 0;
6188 }
6189
6190 static int
6191 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
6192                 const char *params,
6193                 struct rte_tel_data *d)
6194 {
6195         static const char *status_str = "status";
6196         int ret, port_id;
6197         struct rte_eth_link link;
6198         char *end_param;
6199
6200         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6201                 return -1;
6202
6203         port_id = strtoul(params, &end_param, 0);
6204         if (*end_param != '\0')
6205                 RTE_ETHDEV_LOG(NOTICE,
6206                         "Extra parameters passed to ethdev telemetry command, ignoring");
6207         if (!rte_eth_dev_is_valid_port(port_id))
6208                 return -1;
6209
6210         ret = rte_eth_link_get_nowait(port_id, &link);
6211         if (ret < 0)
6212                 return -1;
6213
6214         rte_tel_data_start_dict(d);
6215         if (!link.link_status) {
6216                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
6217                 return 0;
6218         }
6219         rte_tel_data_add_dict_string(d, status_str, "UP");
6220         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
6221         rte_tel_data_add_dict_string(d, "duplex",
6222                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
6223                                 "full-duplex" : "half-duplex");
6224         return 0;
6225 }
6226
6227 int
6228 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
6229                                   struct rte_hairpin_peer_info *cur_info,
6230                                   struct rte_hairpin_peer_info *peer_info,
6231                                   uint32_t direction)
6232 {
6233         struct rte_eth_dev *dev;
6234
6235         /* Current queue information is not mandatory. */
6236         if (peer_info == NULL)
6237                 return -EINVAL;
6238
6239         /* No need to check the validity again. */
6240         dev = &rte_eth_devices[peer_port];
6241         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
6242                                 -ENOTSUP);
6243
6244         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
6245                                         cur_info, peer_info, direction);
6246 }
6247
6248 int
6249 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
6250                                 struct rte_hairpin_peer_info *peer_info,
6251                                 uint32_t direction)
6252 {
6253         struct rte_eth_dev *dev;
6254
6255         if (peer_info == NULL)
6256                 return -EINVAL;
6257
6258         /* No need to check the validity again. */
6259         dev = &rte_eth_devices[cur_port];
6260         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
6261                                 -ENOTSUP);
6262
6263         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
6264                                                         peer_info, direction);
6265 }
6266
6267 int
6268 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
6269                                   uint32_t direction)
6270 {
6271         struct rte_eth_dev *dev;
6272
6273         /* No need to check the validity again. */
6274         dev = &rte_eth_devices[cur_port];
6275         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
6276                                 -ENOTSUP);
6277
6278         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
6279                                                           direction);
6280 }
6281
6282 int
6283 rte_eth_representor_info_get(uint16_t port_id,
6284                              struct rte_eth_representor_info *info)
6285 {
6286         struct rte_eth_dev *dev;
6287
6288         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6289         dev = &rte_eth_devices[port_id];
6290
6291         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
6292         return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6293 }
6294
6295 RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
6296
6297 RTE_INIT(ethdev_init_telemetry)
6298 {
6299         rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
6300                         "Returns list of available ethdev ports. Takes no parameters");
6301         rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
6302                         "Returns the common stats for a port. Parameters: int port_id");
6303         rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
6304                         "Returns the extended stats for a port. Parameters: int port_id");
6305         rte_telemetry_register_cmd("/ethdev/link_status",
6306                         eth_dev_handle_port_link_status,
6307                         "Returns the link status for a port. Parameters: int port_id");
6308 }