d75a3b88061c4dc05e11226af4b2ef0722c4fbbd
[dpdk.git] / lib / ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13
14 #include <rte_byteorder.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_interrupts.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_memzone.h>
21 #include <rte_launch.h>
22 #include <rte_eal.h>
23 #include <rte_per_lcore.h>
24 #include <rte_lcore.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_common.h>
27 #include <rte_mempool.h>
28 #include <rte_malloc.h>
29 #include <rte_mbuf.h>
30 #include <rte_errno.h>
31 #include <rte_spinlock.h>
32 #include <rte_string_fns.h>
33 #include <rte_kvargs.h>
34 #include <rte_class.h>
35 #include <rte_ether.h>
36 #include <rte_telemetry.h>
37
38 #include "rte_ethdev_trace.h"
39 #include "rte_ethdev.h"
40 #include "ethdev_driver.h"
41 #include "ethdev_profile.h"
42 #include "ethdev_private.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46
47 /* spinlock for eth device callbacks */
48 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* spinlock for add/remove rx callbacks */
51 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
52
53 /* spinlock for add/remove tx callbacks */
54 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
55
56 /* spinlock for shared data allocation */
57 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* store statistics names and its offset in stats structure  */
60 struct rte_eth_xstats_name_off {
61         char name[RTE_ETH_XSTATS_NAME_SIZE];
62         unsigned offset;
63 };
64
65 /* Shared memory between primary and secondary processes. */
66 static struct {
67         uint64_t next_owner_id;
68         rte_spinlock_t ownership_lock;
69         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
70 } *eth_dev_shared_data;
71
72 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
73         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
74         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
75         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
76         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
77         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
78         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
79         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
80         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
81                 rx_nombuf)},
82 };
83
84 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
85
86 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
87         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
88         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
89         {"errors", offsetof(struct rte_eth_stats, q_errors)},
90 };
91
92 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
93
94 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
95         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
96         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
97 };
98 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
99
100 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
101         { DEV_RX_OFFLOAD_##_name, #_name }
102
103 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)       \
104         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
105
106 static const struct {
107         uint64_t offload;
108         const char *name;
109 } eth_dev_rx_offload_names[] = {
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
111         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
112         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
113         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
115         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
119         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
120         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
121         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
122         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
123         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
124         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
125         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
126         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
127         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
128         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
129         RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
130 };
131
132 #undef RTE_RX_OFFLOAD_BIT2STR
133 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
134
135 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
136         { DEV_TX_OFFLOAD_##_name, #_name }
137
138 static const struct {
139         uint64_t offload;
140         const char *name;
141 } eth_dev_tx_offload_names[] = {
142         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
143         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
144         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
156         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
157         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
158         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
159         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
160         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
161         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
162         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
163         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
164 };
165
166 #undef RTE_TX_OFFLOAD_BIT2STR
167
168 /**
169  * The user application callback description.
170  *
171  * It contains callback address to be registered by user application,
172  * the pointer to the parameters for callback, and the event type.
173  */
174 struct rte_eth_dev_callback {
175         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
176         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
177         void *cb_arg;                           /**< Parameter for callback */
178         void *ret_param;                        /**< Return parameter */
179         enum rte_eth_event_type event;          /**< Interrupt event type */
180         uint32_t active;                        /**< Callback is executing */
181 };
182
183 enum {
184         STAT_QMAP_TX = 0,
185         STAT_QMAP_RX
186 };
187
188 int
189 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
190 {
191         int ret;
192         struct rte_devargs devargs;
193         const char *bus_param_key;
194         char *bus_str = NULL;
195         char *cls_str = NULL;
196         int str_size;
197
198         if (iter == NULL) {
199                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
200                 return -EINVAL;
201         }
202
203         if (devargs_str == NULL) {
204                 RTE_ETHDEV_LOG(ERR,
205                         "Cannot initialize iterator from NULL device description string\n");
206                 return -EINVAL;
207         }
208
209         memset(iter, 0, sizeof(*iter));
210         memset(&devargs, 0, sizeof(devargs));
211
212         /*
213          * The devargs string may use various syntaxes:
214          *   - 0000:08:00.0,representor=[1-3]
215          *   - pci:0000:06:00.0,representor=[0,5]
216          *   - class=eth,mac=00:11:22:33:44:55
217          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
218          */
219
220         /*
221          * Handle pure class filter (i.e. without any bus-level argument),
222          * from future new syntax.
223          * rte_devargs_parse() is not yet supporting the new syntax,
224          * that's why this simple case is temporarily parsed here.
225          */
226 #define iter_anybus_str "class=eth,"
227         if (strncmp(devargs_str, iter_anybus_str,
228                         strlen(iter_anybus_str)) == 0) {
229                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
230                 goto end;
231         }
232
233         /* Split bus, device and parameters. */
234         ret = rte_devargs_parse(&devargs, devargs_str);
235         if (ret != 0)
236                 goto error;
237
238         /*
239          * Assume parameters of old syntax can match only at ethdev level.
240          * Extra parameters will be ignored, thanks to "+" prefix.
241          */
242         str_size = strlen(devargs.args) + 2;
243         cls_str = malloc(str_size);
244         if (cls_str == NULL) {
245                 ret = -ENOMEM;
246                 goto error;
247         }
248         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
249         if (ret != str_size - 1) {
250                 ret = -EINVAL;
251                 goto error;
252         }
253         iter->cls_str = cls_str;
254
255         iter->bus = devargs.bus;
256         if (iter->bus->dev_iterate == NULL) {
257                 ret = -ENOTSUP;
258                 goto error;
259         }
260
261         /* Convert bus args to new syntax for use with new API dev_iterate. */
262         if ((strcmp(iter->bus->name, "vdev") == 0) ||
263                 (strcmp(iter->bus->name, "fslmc") == 0) ||
264                 (strcmp(iter->bus->name, "dpaa_bus") == 0)) {
265                 bus_param_key = "name";
266         } else if (strcmp(iter->bus->name, "pci") == 0) {
267                 bus_param_key = "addr";
268         } else {
269                 ret = -ENOTSUP;
270                 goto error;
271         }
272         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
273         bus_str = malloc(str_size);
274         if (bus_str == NULL) {
275                 ret = -ENOMEM;
276                 goto error;
277         }
278         ret = snprintf(bus_str, str_size, "%s=%s",
279                         bus_param_key, devargs.name);
280         if (ret != str_size - 1) {
281                 ret = -EINVAL;
282                 goto error;
283         }
284         iter->bus_str = bus_str;
285
286 end:
287         iter->cls = rte_class_find_by_name("eth");
288         rte_devargs_reset(&devargs);
289         return 0;
290
291 error:
292         if (ret == -ENOTSUP)
293                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
294                                 iter->bus->name);
295         rte_devargs_reset(&devargs);
296         free(bus_str);
297         free(cls_str);
298         return ret;
299 }
300
301 uint16_t
302 rte_eth_iterator_next(struct rte_dev_iterator *iter)
303 {
304         if (iter == NULL) {
305                 RTE_ETHDEV_LOG(ERR,
306                         "Cannot get next device from NULL iterator\n");
307                 return RTE_MAX_ETHPORTS;
308         }
309
310         if (iter->cls == NULL) /* invalid ethdev iterator */
311                 return RTE_MAX_ETHPORTS;
312
313         do { /* loop to try all matching rte_device */
314                 /* If not pure ethdev filter and */
315                 if (iter->bus != NULL &&
316                                 /* not in middle of rte_eth_dev iteration, */
317                                 iter->class_device == NULL) {
318                         /* get next rte_device to try. */
319                         iter->device = iter->bus->dev_iterate(
320                                         iter->device, iter->bus_str, iter);
321                         if (iter->device == NULL)
322                                 break; /* no more rte_device candidate */
323                 }
324                 /* A device is matching bus part, need to check ethdev part. */
325                 iter->class_device = iter->cls->dev_iterate(
326                                 iter->class_device, iter->cls_str, iter);
327                 if (iter->class_device != NULL)
328                         return eth_dev_to_id(iter->class_device); /* match */
329         } while (iter->bus != NULL); /* need to try next rte_device */
330
331         /* No more ethdev port to iterate. */
332         rte_eth_iterator_cleanup(iter);
333         return RTE_MAX_ETHPORTS;
334 }
335
336 void
337 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
338 {
339         if (iter == NULL) {
340                 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
341                 return;
342         }
343
344         if (iter->bus_str == NULL)
345                 return; /* nothing to free in pure class filter */
346         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
347         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
348         memset(iter, 0, sizeof(*iter));
349 }
350
351 uint16_t
352 rte_eth_find_next(uint16_t port_id)
353 {
354         while (port_id < RTE_MAX_ETHPORTS &&
355                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
356                 port_id++;
357
358         if (port_id >= RTE_MAX_ETHPORTS)
359                 return RTE_MAX_ETHPORTS;
360
361         return port_id;
362 }
363
364 /*
365  * Macro to iterate over all valid ports for internal usage.
366  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
367  */
368 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
369         for (port_id = rte_eth_find_next(0); \
370              port_id < RTE_MAX_ETHPORTS; \
371              port_id = rte_eth_find_next(port_id + 1))
372
373 uint16_t
374 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
375 {
376         port_id = rte_eth_find_next(port_id);
377         while (port_id < RTE_MAX_ETHPORTS &&
378                         rte_eth_devices[port_id].device != parent)
379                 port_id = rte_eth_find_next(port_id + 1);
380
381         return port_id;
382 }
383
384 uint16_t
385 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
386 {
387         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
388         return rte_eth_find_next_of(port_id,
389                         rte_eth_devices[ref_port_id].device);
390 }
391
392 static void
393 eth_dev_shared_data_prepare(void)
394 {
395         const unsigned flags = 0;
396         const struct rte_memzone *mz;
397
398         rte_spinlock_lock(&eth_dev_shared_data_lock);
399
400         if (eth_dev_shared_data == NULL) {
401                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
402                         /* Allocate port data and ownership shared memory. */
403                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
404                                         sizeof(*eth_dev_shared_data),
405                                         rte_socket_id(), flags);
406                 } else
407                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
408                 if (mz == NULL)
409                         rte_panic("Cannot allocate ethdev shared data\n");
410
411                 eth_dev_shared_data = mz->addr;
412                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
413                         eth_dev_shared_data->next_owner_id =
414                                         RTE_ETH_DEV_NO_OWNER + 1;
415                         rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
416                         memset(eth_dev_shared_data->data, 0,
417                                sizeof(eth_dev_shared_data->data));
418                 }
419         }
420
421         rte_spinlock_unlock(&eth_dev_shared_data_lock);
422 }
423
424 static bool
425 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
426 {
427         return ethdev->data->name[0] != '\0';
428 }
429
430 static struct rte_eth_dev *
431 eth_dev_allocated(const char *name)
432 {
433         uint16_t i;
434
435         RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
436
437         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
438                 if (rte_eth_devices[i].data != NULL &&
439                     strcmp(rte_eth_devices[i].data->name, name) == 0)
440                         return &rte_eth_devices[i];
441         }
442         return NULL;
443 }
444
445 struct rte_eth_dev *
446 rte_eth_dev_allocated(const char *name)
447 {
448         struct rte_eth_dev *ethdev;
449
450         eth_dev_shared_data_prepare();
451
452         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
453
454         ethdev = eth_dev_allocated(name);
455
456         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
457
458         return ethdev;
459 }
460
461 static uint16_t
462 eth_dev_find_free_port(void)
463 {
464         uint16_t i;
465
466         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
467                 /* Using shared name field to find a free port. */
468                 if (eth_dev_shared_data->data[i].name[0] == '\0') {
469                         RTE_ASSERT(rte_eth_devices[i].state ==
470                                    RTE_ETH_DEV_UNUSED);
471                         return i;
472                 }
473         }
474         return RTE_MAX_ETHPORTS;
475 }
476
477 static struct rte_eth_dev *
478 eth_dev_get(uint16_t port_id)
479 {
480         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
481
482         eth_dev->data = &eth_dev_shared_data->data[port_id];
483
484         return eth_dev;
485 }
486
487 struct rte_eth_dev *
488 rte_eth_dev_allocate(const char *name)
489 {
490         uint16_t port_id;
491         struct rte_eth_dev *eth_dev = NULL;
492         size_t name_len;
493
494         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
495         if (name_len == 0) {
496                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
497                 return NULL;
498         }
499
500         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
501                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
502                 return NULL;
503         }
504
505         eth_dev_shared_data_prepare();
506
507         /* Synchronize port creation between primary and secondary threads. */
508         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
509
510         if (eth_dev_allocated(name) != NULL) {
511                 RTE_ETHDEV_LOG(ERR,
512                         "Ethernet device with name %s already allocated\n",
513                         name);
514                 goto unlock;
515         }
516
517         port_id = eth_dev_find_free_port();
518         if (port_id == RTE_MAX_ETHPORTS) {
519                 RTE_ETHDEV_LOG(ERR,
520                         "Reached maximum number of Ethernet ports\n");
521                 goto unlock;
522         }
523
524         eth_dev = eth_dev_get(port_id);
525         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
526         eth_dev->data->port_id = port_id;
527         eth_dev->data->mtu = RTE_ETHER_MTU;
528         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
529
530 unlock:
531         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
532
533         return eth_dev;
534 }
535
536 /*
537  * Attach to a port already registered by the primary process, which
538  * makes sure that the same device would have the same port id both
539  * in the primary and secondary process.
540  */
541 struct rte_eth_dev *
542 rte_eth_dev_attach_secondary(const char *name)
543 {
544         uint16_t i;
545         struct rte_eth_dev *eth_dev = NULL;
546
547         eth_dev_shared_data_prepare();
548
549         /* Synchronize port attachment to primary port creation and release. */
550         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
551
552         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
553                 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
554                         break;
555         }
556         if (i == RTE_MAX_ETHPORTS) {
557                 RTE_ETHDEV_LOG(ERR,
558                         "Device %s is not driven by the primary process\n",
559                         name);
560         } else {
561                 eth_dev = eth_dev_get(i);
562                 RTE_ASSERT(eth_dev->data->port_id == i);
563         }
564
565         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
566         return eth_dev;
567 }
568
569 int
570 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
571 {
572         if (eth_dev == NULL)
573                 return -EINVAL;
574
575         eth_dev_shared_data_prepare();
576
577         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
578                 rte_eth_dev_callback_process(eth_dev,
579                                 RTE_ETH_EVENT_DESTROY, NULL);
580
581         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
582
583         eth_dev->state = RTE_ETH_DEV_UNUSED;
584         eth_dev->device = NULL;
585         eth_dev->process_private = NULL;
586         eth_dev->intr_handle = NULL;
587         eth_dev->rx_pkt_burst = NULL;
588         eth_dev->tx_pkt_burst = NULL;
589         eth_dev->tx_pkt_prepare = NULL;
590         eth_dev->rx_queue_count = NULL;
591         eth_dev->rx_descriptor_done = NULL;
592         eth_dev->rx_descriptor_status = NULL;
593         eth_dev->tx_descriptor_status = NULL;
594         eth_dev->dev_ops = NULL;
595
596         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
597                 rte_free(eth_dev->data->rx_queues);
598                 rte_free(eth_dev->data->tx_queues);
599                 rte_free(eth_dev->data->mac_addrs);
600                 rte_free(eth_dev->data->hash_mac_addrs);
601                 rte_free(eth_dev->data->dev_private);
602                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
603                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
604         }
605
606         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
607
608         return 0;
609 }
610
611 int
612 rte_eth_dev_is_valid_port(uint16_t port_id)
613 {
614         if (port_id >= RTE_MAX_ETHPORTS ||
615             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
616                 return 0;
617         else
618                 return 1;
619 }
620
621 static int
622 eth_is_valid_owner_id(uint64_t owner_id)
623 {
624         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
625             eth_dev_shared_data->next_owner_id <= owner_id)
626                 return 0;
627         return 1;
628 }
629
630 uint64_t
631 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
632 {
633         port_id = rte_eth_find_next(port_id);
634         while (port_id < RTE_MAX_ETHPORTS &&
635                         rte_eth_devices[port_id].data->owner.id != owner_id)
636                 port_id = rte_eth_find_next(port_id + 1);
637
638         return port_id;
639 }
640
641 int
642 rte_eth_dev_owner_new(uint64_t *owner_id)
643 {
644         if (owner_id == NULL) {
645                 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
646                 return -EINVAL;
647         }
648
649         eth_dev_shared_data_prepare();
650
651         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
652
653         *owner_id = eth_dev_shared_data->next_owner_id++;
654
655         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
656         return 0;
657 }
658
659 static int
660 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
661                        const struct rte_eth_dev_owner *new_owner)
662 {
663         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
664         struct rte_eth_dev_owner *port_owner;
665
666         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
667                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
668                         port_id);
669                 return -ENODEV;
670         }
671
672         if (new_owner == NULL) {
673                 RTE_ETHDEV_LOG(ERR,
674                         "Cannot set ethdev port %u owner from NULL owner\n",
675                         port_id);
676                 return -EINVAL;
677         }
678
679         if (!eth_is_valid_owner_id(new_owner->id) &&
680             !eth_is_valid_owner_id(old_owner_id)) {
681                 RTE_ETHDEV_LOG(ERR,
682                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
683                        old_owner_id, new_owner->id);
684                 return -EINVAL;
685         }
686
687         port_owner = &rte_eth_devices[port_id].data->owner;
688         if (port_owner->id != old_owner_id) {
689                 RTE_ETHDEV_LOG(ERR,
690                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
691                         port_id, port_owner->name, port_owner->id);
692                 return -EPERM;
693         }
694
695         /* can not truncate (same structure) */
696         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
697
698         port_owner->id = new_owner->id;
699
700         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
701                 port_id, new_owner->name, new_owner->id);
702
703         return 0;
704 }
705
706 int
707 rte_eth_dev_owner_set(const uint16_t port_id,
708                       const struct rte_eth_dev_owner *owner)
709 {
710         int ret;
711
712         eth_dev_shared_data_prepare();
713
714         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
715
716         ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
717
718         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
719         return ret;
720 }
721
722 int
723 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
724 {
725         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
726                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
727         int ret;
728
729         eth_dev_shared_data_prepare();
730
731         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
732
733         ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
734
735         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
736         return ret;
737 }
738
739 int
740 rte_eth_dev_owner_delete(const uint64_t owner_id)
741 {
742         uint16_t port_id;
743         int ret = 0;
744
745         eth_dev_shared_data_prepare();
746
747         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
748
749         if (eth_is_valid_owner_id(owner_id)) {
750                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
751                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
752                                 memset(&rte_eth_devices[port_id].data->owner, 0,
753                                        sizeof(struct rte_eth_dev_owner));
754                 RTE_ETHDEV_LOG(NOTICE,
755                         "All port owners owned by %016"PRIx64" identifier have removed\n",
756                         owner_id);
757         } else {
758                 RTE_ETHDEV_LOG(ERR,
759                                "Invalid owner id=%016"PRIx64"\n",
760                                owner_id);
761                 ret = -EINVAL;
762         }
763
764         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
765
766         return ret;
767 }
768
769 int
770 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
771 {
772         struct rte_eth_dev *ethdev;
773
774         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
775         ethdev = &rte_eth_devices[port_id];
776
777         if (!eth_dev_is_allocated(ethdev)) {
778                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
779                         port_id);
780                 return -ENODEV;
781         }
782
783         if (owner == NULL) {
784                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
785                         port_id);
786                 return -EINVAL;
787         }
788
789         eth_dev_shared_data_prepare();
790
791         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
792         rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
793         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
794
795         return 0;
796 }
797
798 int
799 rte_eth_dev_socket_id(uint16_t port_id)
800 {
801         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
802         return rte_eth_devices[port_id].data->numa_node;
803 }
804
805 void *
806 rte_eth_dev_get_sec_ctx(uint16_t port_id)
807 {
808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
809         return rte_eth_devices[port_id].security_ctx;
810 }
811
812 uint16_t
813 rte_eth_dev_count_avail(void)
814 {
815         uint16_t p;
816         uint16_t count;
817
818         count = 0;
819
820         RTE_ETH_FOREACH_DEV(p)
821                 count++;
822
823         return count;
824 }
825
826 uint16_t
827 rte_eth_dev_count_total(void)
828 {
829         uint16_t port, count = 0;
830
831         RTE_ETH_FOREACH_VALID_DEV(port)
832                 count++;
833
834         return count;
835 }
836
837 int
838 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
839 {
840         char *tmp;
841
842         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
843
844         if (name == NULL) {
845                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
846                         port_id);
847                 return -EINVAL;
848         }
849
850         /* shouldn't check 'rte_eth_devices[i].data',
851          * because it might be overwritten by VDEV PMD */
852         tmp = eth_dev_shared_data->data[port_id].name;
853         strcpy(name, tmp);
854         return 0;
855 }
856
857 int
858 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
859 {
860         uint16_t pid;
861
862         if (name == NULL) {
863                 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
864                 return -EINVAL;
865         }
866
867         if (port_id == NULL) {
868                 RTE_ETHDEV_LOG(ERR,
869                         "Cannot get port ID to NULL for %s\n", name);
870                 return -EINVAL;
871         }
872
873         RTE_ETH_FOREACH_VALID_DEV(pid)
874                 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
875                         *port_id = pid;
876                         return 0;
877                 }
878
879         return -ENODEV;
880 }
881
882 static int
883 eth_err(uint16_t port_id, int ret)
884 {
885         if (ret == 0)
886                 return 0;
887         if (rte_eth_dev_is_removed(port_id))
888                 return -EIO;
889         return ret;
890 }
891
892 static void
893 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
894 {
895         void **rxq = dev->data->rx_queues;
896
897         if (rxq[qid] == NULL)
898                 return;
899
900         if (dev->dev_ops->rx_queue_release != NULL)
901                 (*dev->dev_ops->rx_queue_release)(rxq[qid]);
902         rxq[qid] = NULL;
903 }
904
905 static void
906 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
907 {
908         void **txq = dev->data->tx_queues;
909
910         if (txq[qid] == NULL)
911                 return;
912
913         if (dev->dev_ops->tx_queue_release != NULL)
914                 (*dev->dev_ops->tx_queue_release)(txq[qid]);
915         txq[qid] = NULL;
916 }
917
918 static int
919 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
920 {
921         uint16_t old_nb_queues = dev->data->nb_rx_queues;
922         void **rxq;
923         unsigned i;
924
925         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
926                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
927                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
928                                 RTE_CACHE_LINE_SIZE);
929                 if (dev->data->rx_queues == NULL) {
930                         dev->data->nb_rx_queues = 0;
931                         return -(ENOMEM);
932                 }
933         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
934                 for (i = nb_queues; i < old_nb_queues; i++)
935                         eth_dev_rxq_release(dev, i);
936
937                 rxq = dev->data->rx_queues;
938                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
939                                 RTE_CACHE_LINE_SIZE);
940                 if (rxq == NULL)
941                         return -(ENOMEM);
942                 if (nb_queues > old_nb_queues) {
943                         uint16_t new_qs = nb_queues - old_nb_queues;
944
945                         memset(rxq + old_nb_queues, 0,
946                                 sizeof(rxq[0]) * new_qs);
947                 }
948
949                 dev->data->rx_queues = rxq;
950
951         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
952                 for (i = nb_queues; i < old_nb_queues; i++)
953                         eth_dev_rxq_release(dev, i);
954
955                 rte_free(dev->data->rx_queues);
956                 dev->data->rx_queues = NULL;
957         }
958         dev->data->nb_rx_queues = nb_queues;
959         return 0;
960 }
961
962 static int
963 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
964 {
965         uint16_t port_id;
966
967         if (rx_queue_id >= dev->data->nb_rx_queues) {
968                 port_id = dev->data->port_id;
969                 RTE_ETHDEV_LOG(ERR,
970                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
971                                rx_queue_id, port_id);
972                 return -EINVAL;
973         }
974
975         if (dev->data->rx_queues[rx_queue_id] == NULL) {
976                 port_id = dev->data->port_id;
977                 RTE_ETHDEV_LOG(ERR,
978                                "Queue %u of device with port_id=%u has not been setup\n",
979                                rx_queue_id, port_id);
980                 return -EINVAL;
981         }
982
983         return 0;
984 }
985
986 static int
987 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
988 {
989         uint16_t port_id;
990
991         if (tx_queue_id >= dev->data->nb_tx_queues) {
992                 port_id = dev->data->port_id;
993                 RTE_ETHDEV_LOG(ERR,
994                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
995                                tx_queue_id, port_id);
996                 return -EINVAL;
997         }
998
999         if (dev->data->tx_queues[tx_queue_id] == NULL) {
1000                 port_id = dev->data->port_id;
1001                 RTE_ETHDEV_LOG(ERR,
1002                                "Queue %u of device with port_id=%u has not been setup\n",
1003                                tx_queue_id, port_id);
1004                 return -EINVAL;
1005         }
1006
1007         return 0;
1008 }
1009
1010 int
1011 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
1012 {
1013         struct rte_eth_dev *dev;
1014         int ret;
1015
1016         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1017         dev = &rte_eth_devices[port_id];
1018
1019         if (!dev->data->dev_started) {
1020                 RTE_ETHDEV_LOG(ERR,
1021                         "Port %u must be started before start any queue\n",
1022                         port_id);
1023                 return -EINVAL;
1024         }
1025
1026         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1027         if (ret != 0)
1028                 return ret;
1029
1030         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
1031
1032         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1033                 RTE_ETHDEV_LOG(INFO,
1034                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1035                         rx_queue_id, port_id);
1036                 return -EINVAL;
1037         }
1038
1039         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1040                 RTE_ETHDEV_LOG(INFO,
1041                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1042                         rx_queue_id, port_id);
1043                 return 0;
1044         }
1045
1046         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
1047 }
1048
1049 int
1050 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
1051 {
1052         struct rte_eth_dev *dev;
1053         int ret;
1054
1055         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1056         dev = &rte_eth_devices[port_id];
1057
1058         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
1059         if (ret != 0)
1060                 return ret;
1061
1062         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
1063
1064         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1065                 RTE_ETHDEV_LOG(INFO,
1066                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1067                         rx_queue_id, port_id);
1068                 return -EINVAL;
1069         }
1070
1071         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1072                 RTE_ETHDEV_LOG(INFO,
1073                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1074                         rx_queue_id, port_id);
1075                 return 0;
1076         }
1077
1078         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1079 }
1080
1081 int
1082 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1083 {
1084         struct rte_eth_dev *dev;
1085         int ret;
1086
1087         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1088         dev = &rte_eth_devices[port_id];
1089
1090         if (!dev->data->dev_started) {
1091                 RTE_ETHDEV_LOG(ERR,
1092                         "Port %u must be started before start any queue\n",
1093                         port_id);
1094                 return -EINVAL;
1095         }
1096
1097         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1098         if (ret != 0)
1099                 return ret;
1100
1101         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1102
1103         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1104                 RTE_ETHDEV_LOG(INFO,
1105                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1106                         tx_queue_id, port_id);
1107                 return -EINVAL;
1108         }
1109
1110         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1111                 RTE_ETHDEV_LOG(INFO,
1112                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1113                         tx_queue_id, port_id);
1114                 return 0;
1115         }
1116
1117         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1118 }
1119
1120 int
1121 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1122 {
1123         struct rte_eth_dev *dev;
1124         int ret;
1125
1126         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1127         dev = &rte_eth_devices[port_id];
1128
1129         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1130         if (ret != 0)
1131                 return ret;
1132
1133         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1134
1135         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1136                 RTE_ETHDEV_LOG(INFO,
1137                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1138                         tx_queue_id, port_id);
1139                 return -EINVAL;
1140         }
1141
1142         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1143                 RTE_ETHDEV_LOG(INFO,
1144                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1145                         tx_queue_id, port_id);
1146                 return 0;
1147         }
1148
1149         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1150 }
1151
1152 static int
1153 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1154 {
1155         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1156         void **txq;
1157         unsigned i;
1158
1159         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1160                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1161                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1162                                                    RTE_CACHE_LINE_SIZE);
1163                 if (dev->data->tx_queues == NULL) {
1164                         dev->data->nb_tx_queues = 0;
1165                         return -(ENOMEM);
1166                 }
1167         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1168                 for (i = nb_queues; i < old_nb_queues; i++)
1169                         eth_dev_txq_release(dev, i);
1170
1171                 txq = dev->data->tx_queues;
1172                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1173                                   RTE_CACHE_LINE_SIZE);
1174                 if (txq == NULL)
1175                         return -ENOMEM;
1176                 if (nb_queues > old_nb_queues) {
1177                         uint16_t new_qs = nb_queues - old_nb_queues;
1178
1179                         memset(txq + old_nb_queues, 0,
1180                                sizeof(txq[0]) * new_qs);
1181                 }
1182
1183                 dev->data->tx_queues = txq;
1184
1185         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1186                 for (i = nb_queues; i < old_nb_queues; i++)
1187                         eth_dev_txq_release(dev, i);
1188
1189                 rte_free(dev->data->tx_queues);
1190                 dev->data->tx_queues = NULL;
1191         }
1192         dev->data->nb_tx_queues = nb_queues;
1193         return 0;
1194 }
1195
1196 uint32_t
1197 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1198 {
1199         switch (speed) {
1200         case ETH_SPEED_NUM_10M:
1201                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1202         case ETH_SPEED_NUM_100M:
1203                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1204         case ETH_SPEED_NUM_1G:
1205                 return ETH_LINK_SPEED_1G;
1206         case ETH_SPEED_NUM_2_5G:
1207                 return ETH_LINK_SPEED_2_5G;
1208         case ETH_SPEED_NUM_5G:
1209                 return ETH_LINK_SPEED_5G;
1210         case ETH_SPEED_NUM_10G:
1211                 return ETH_LINK_SPEED_10G;
1212         case ETH_SPEED_NUM_20G:
1213                 return ETH_LINK_SPEED_20G;
1214         case ETH_SPEED_NUM_25G:
1215                 return ETH_LINK_SPEED_25G;
1216         case ETH_SPEED_NUM_40G:
1217                 return ETH_LINK_SPEED_40G;
1218         case ETH_SPEED_NUM_50G:
1219                 return ETH_LINK_SPEED_50G;
1220         case ETH_SPEED_NUM_56G:
1221                 return ETH_LINK_SPEED_56G;
1222         case ETH_SPEED_NUM_100G:
1223                 return ETH_LINK_SPEED_100G;
1224         case ETH_SPEED_NUM_200G:
1225                 return ETH_LINK_SPEED_200G;
1226         default:
1227                 return 0;
1228         }
1229 }
1230
1231 const char *
1232 rte_eth_dev_rx_offload_name(uint64_t offload)
1233 {
1234         const char *name = "UNKNOWN";
1235         unsigned int i;
1236
1237         for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1238                 if (offload == eth_dev_rx_offload_names[i].offload) {
1239                         name = eth_dev_rx_offload_names[i].name;
1240                         break;
1241                 }
1242         }
1243
1244         return name;
1245 }
1246
1247 const char *
1248 rte_eth_dev_tx_offload_name(uint64_t offload)
1249 {
1250         const char *name = "UNKNOWN";
1251         unsigned int i;
1252
1253         for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1254                 if (offload == eth_dev_tx_offload_names[i].offload) {
1255                         name = eth_dev_tx_offload_names[i].name;
1256                         break;
1257                 }
1258         }
1259
1260         return name;
1261 }
1262
1263 static inline int
1264 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1265                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1266 {
1267         int ret = 0;
1268
1269         if (dev_info_size == 0) {
1270                 if (config_size != max_rx_pkt_len) {
1271                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1272                                        " %u != %u is not allowed\n",
1273                                        port_id, config_size, max_rx_pkt_len);
1274                         ret = -EINVAL;
1275                 }
1276         } else if (config_size > dev_info_size) {
1277                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1278                                "> max allowed value %u\n", port_id, config_size,
1279                                dev_info_size);
1280                 ret = -EINVAL;
1281         } else if (config_size < RTE_ETHER_MIN_LEN) {
1282                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1283                                "< min allowed value %u\n", port_id, config_size,
1284                                (unsigned int)RTE_ETHER_MIN_LEN);
1285                 ret = -EINVAL;
1286         }
1287         return ret;
1288 }
1289
1290 /*
1291  * Validate offloads that are requested through rte_eth_dev_configure against
1292  * the offloads successfully set by the ethernet device.
1293  *
1294  * @param port_id
1295  *   The port identifier of the Ethernet device.
1296  * @param req_offloads
1297  *   The offloads that have been requested through `rte_eth_dev_configure`.
1298  * @param set_offloads
1299  *   The offloads successfully set by the ethernet device.
1300  * @param offload_type
1301  *   The offload type i.e. Rx/Tx string.
1302  * @param offload_name
1303  *   The function that prints the offload name.
1304  * @return
1305  *   - (0) if validation successful.
1306  *   - (-EINVAL) if requested offload has been silently disabled.
1307  *
1308  */
1309 static int
1310 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1311                   uint64_t set_offloads, const char *offload_type,
1312                   const char *(*offload_name)(uint64_t))
1313 {
1314         uint64_t offloads_diff = req_offloads ^ set_offloads;
1315         uint64_t offload;
1316         int ret = 0;
1317
1318         while (offloads_diff != 0) {
1319                 /* Check if any offload is requested but not enabled. */
1320                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1321                 if (offload & req_offloads) {
1322                         RTE_ETHDEV_LOG(ERR,
1323                                 "Port %u failed to enable %s offload %s\n",
1324                                 port_id, offload_type, offload_name(offload));
1325                         ret = -EINVAL;
1326                 }
1327
1328                 /* Check if offload couldn't be disabled. */
1329                 if (offload & set_offloads) {
1330                         RTE_ETHDEV_LOG(DEBUG,
1331                                 "Port %u %s offload %s is not requested but enabled\n",
1332                                 port_id, offload_type, offload_name(offload));
1333                 }
1334
1335                 offloads_diff &= ~offload;
1336         }
1337
1338         return ret;
1339 }
1340
1341 int
1342 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1343                       const struct rte_eth_conf *dev_conf)
1344 {
1345         struct rte_eth_dev *dev;
1346         struct rte_eth_dev_info dev_info;
1347         struct rte_eth_conf orig_conf;
1348         uint16_t overhead_len;
1349         int diag;
1350         int ret;
1351         uint16_t old_mtu;
1352
1353         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1354         dev = &rte_eth_devices[port_id];
1355
1356         if (dev_conf == NULL) {
1357                 RTE_ETHDEV_LOG(ERR,
1358                         "Cannot configure ethdev port %u from NULL config\n",
1359                         port_id);
1360                 return -EINVAL;
1361         }
1362
1363         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1364
1365         if (dev->data->dev_started) {
1366                 RTE_ETHDEV_LOG(ERR,
1367                         "Port %u must be stopped to allow configuration\n",
1368                         port_id);
1369                 return -EBUSY;
1370         }
1371
1372         /*
1373          * Ensure that "dev_configured" is always 0 each time prepare to do
1374          * dev_configure() to avoid any non-anticipated behaviour.
1375          * And set to 1 when dev_configure() is executed successfully.
1376          */
1377         dev->data->dev_configured = 0;
1378
1379          /* Store original config, as rollback required on failure */
1380         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1381
1382         /*
1383          * Copy the dev_conf parameter into the dev structure.
1384          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1385          */
1386         if (dev_conf != &dev->data->dev_conf)
1387                 memcpy(&dev->data->dev_conf, dev_conf,
1388                        sizeof(dev->data->dev_conf));
1389
1390         /* Backup mtu for rollback */
1391         old_mtu = dev->data->mtu;
1392
1393         ret = rte_eth_dev_info_get(port_id, &dev_info);
1394         if (ret != 0)
1395                 goto rollback;
1396
1397         /* Get the real Ethernet overhead length */
1398         if (dev_info.max_mtu != UINT16_MAX &&
1399             dev_info.max_rx_pktlen > dev_info.max_mtu)
1400                 overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
1401         else
1402                 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1403
1404         /* If number of queues specified by application for both Rx and Tx is
1405          * zero, use driver preferred values. This cannot be done individually
1406          * as it is valid for either Tx or Rx (but not both) to be zero.
1407          * If driver does not provide any preferred valued, fall back on
1408          * EAL defaults.
1409          */
1410         if (nb_rx_q == 0 && nb_tx_q == 0) {
1411                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1412                 if (nb_rx_q == 0)
1413                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1414                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1415                 if (nb_tx_q == 0)
1416                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1417         }
1418
1419         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1420                 RTE_ETHDEV_LOG(ERR,
1421                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1422                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1423                 ret = -EINVAL;
1424                 goto rollback;
1425         }
1426
1427         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1428                 RTE_ETHDEV_LOG(ERR,
1429                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1430                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1431                 ret = -EINVAL;
1432                 goto rollback;
1433         }
1434
1435         /*
1436          * Check that the numbers of RX and TX queues are not greater
1437          * than the maximum number of RX and TX queues supported by the
1438          * configured device.
1439          */
1440         if (nb_rx_q > dev_info.max_rx_queues) {
1441                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1442                         port_id, nb_rx_q, dev_info.max_rx_queues);
1443                 ret = -EINVAL;
1444                 goto rollback;
1445         }
1446
1447         if (nb_tx_q > dev_info.max_tx_queues) {
1448                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1449                         port_id, nb_tx_q, dev_info.max_tx_queues);
1450                 ret = -EINVAL;
1451                 goto rollback;
1452         }
1453
1454         /* Check that the device supports requested interrupts */
1455         if ((dev_conf->intr_conf.lsc == 1) &&
1456                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1457                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1458                         dev->device->driver->name);
1459                 ret = -EINVAL;
1460                 goto rollback;
1461         }
1462         if ((dev_conf->intr_conf.rmv == 1) &&
1463                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1464                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1465                         dev->device->driver->name);
1466                 ret = -EINVAL;
1467                 goto rollback;
1468         }
1469
1470         /*
1471          * If jumbo frames are enabled, check that the maximum RX packet
1472          * length is supported by the configured device.
1473          */
1474         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1475                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1476                         RTE_ETHDEV_LOG(ERR,
1477                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1478                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1479                                 dev_info.max_rx_pktlen);
1480                         ret = -EINVAL;
1481                         goto rollback;
1482                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1483                         RTE_ETHDEV_LOG(ERR,
1484                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1485                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1486                                 (unsigned int)RTE_ETHER_MIN_LEN);
1487                         ret = -EINVAL;
1488                         goto rollback;
1489                 }
1490
1491                 /* Scale the MTU size to adapt max_rx_pkt_len */
1492                 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
1493                                 overhead_len;
1494         } else {
1495                 uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
1496                 if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
1497                     pktlen > RTE_ETHER_MTU + overhead_len)
1498                         /* Use default value */
1499                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1500                                                 RTE_ETHER_MTU + overhead_len;
1501         }
1502
1503         /*
1504          * If LRO is enabled, check that the maximum aggregated packet
1505          * size is supported by the configured device.
1506          */
1507         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1508                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1509                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1510                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1511                 ret = eth_dev_check_lro_pkt_size(port_id,
1512                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1513                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1514                                 dev_info.max_lro_pkt_size);
1515                 if (ret != 0)
1516                         goto rollback;
1517         }
1518
1519         /* Any requested offloading must be within its device capabilities */
1520         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1521              dev_conf->rxmode.offloads) {
1522                 RTE_ETHDEV_LOG(ERR,
1523                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1524                         "capabilities 0x%"PRIx64" in %s()\n",
1525                         port_id, dev_conf->rxmode.offloads,
1526                         dev_info.rx_offload_capa,
1527                         __func__);
1528                 ret = -EINVAL;
1529                 goto rollback;
1530         }
1531         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1532              dev_conf->txmode.offloads) {
1533                 RTE_ETHDEV_LOG(ERR,
1534                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1535                         "capabilities 0x%"PRIx64" in %s()\n",
1536                         port_id, dev_conf->txmode.offloads,
1537                         dev_info.tx_offload_capa,
1538                         __func__);
1539                 ret = -EINVAL;
1540                 goto rollback;
1541         }
1542
1543         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1544                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1545
1546         /* Check that device supports requested rss hash functions. */
1547         if ((dev_info.flow_type_rss_offloads |
1548              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1549             dev_info.flow_type_rss_offloads) {
1550                 RTE_ETHDEV_LOG(ERR,
1551                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1552                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1553                         dev_info.flow_type_rss_offloads);
1554                 ret = -EINVAL;
1555                 goto rollback;
1556         }
1557
1558         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1559         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1560             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1561                 RTE_ETHDEV_LOG(ERR,
1562                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1563                         port_id,
1564                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1565                 ret = -EINVAL;
1566                 goto rollback;
1567         }
1568
1569         /*
1570          * Setup new number of RX/TX queues and reconfigure device.
1571          */
1572         diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1573         if (diag != 0) {
1574                 RTE_ETHDEV_LOG(ERR,
1575                         "Port%u eth_dev_rx_queue_config = %d\n",
1576                         port_id, diag);
1577                 ret = diag;
1578                 goto rollback;
1579         }
1580
1581         diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1582         if (diag != 0) {
1583                 RTE_ETHDEV_LOG(ERR,
1584                         "Port%u eth_dev_tx_queue_config = %d\n",
1585                         port_id, diag);
1586                 eth_dev_rx_queue_config(dev, 0);
1587                 ret = diag;
1588                 goto rollback;
1589         }
1590
1591         diag = (*dev->dev_ops->dev_configure)(dev);
1592         if (diag != 0) {
1593                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1594                         port_id, diag);
1595                 ret = eth_err(port_id, diag);
1596                 goto reset_queues;
1597         }
1598
1599         /* Initialize Rx profiling if enabled at compilation time. */
1600         diag = __rte_eth_dev_profile_init(port_id, dev);
1601         if (diag != 0) {
1602                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1603                         port_id, diag);
1604                 ret = eth_err(port_id, diag);
1605                 goto reset_queues;
1606         }
1607
1608         /* Validate Rx offloads. */
1609         diag = eth_dev_validate_offloads(port_id,
1610                         dev_conf->rxmode.offloads,
1611                         dev->data->dev_conf.rxmode.offloads, "Rx",
1612                         rte_eth_dev_rx_offload_name);
1613         if (diag != 0) {
1614                 ret = diag;
1615                 goto reset_queues;
1616         }
1617
1618         /* Validate Tx offloads. */
1619         diag = eth_dev_validate_offloads(port_id,
1620                         dev_conf->txmode.offloads,
1621                         dev->data->dev_conf.txmode.offloads, "Tx",
1622                         rte_eth_dev_tx_offload_name);
1623         if (diag != 0) {
1624                 ret = diag;
1625                 goto reset_queues;
1626         }
1627
1628         dev->data->dev_configured = 1;
1629         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1630         return 0;
1631 reset_queues:
1632         eth_dev_rx_queue_config(dev, 0);
1633         eth_dev_tx_queue_config(dev, 0);
1634 rollback:
1635         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1636         if (old_mtu != dev->data->mtu)
1637                 dev->data->mtu = old_mtu;
1638
1639         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1640         return ret;
1641 }
1642
1643 void
1644 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1645 {
1646         if (dev->data->dev_started) {
1647                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1648                         dev->data->port_id);
1649                 return;
1650         }
1651
1652         eth_dev_rx_queue_config(dev, 0);
1653         eth_dev_tx_queue_config(dev, 0);
1654
1655         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1656 }
1657
1658 static void
1659 eth_dev_mac_restore(struct rte_eth_dev *dev,
1660                         struct rte_eth_dev_info *dev_info)
1661 {
1662         struct rte_ether_addr *addr;
1663         uint16_t i;
1664         uint32_t pool = 0;
1665         uint64_t pool_mask;
1666
1667         /* replay MAC address configuration including default MAC */
1668         addr = &dev->data->mac_addrs[0];
1669         if (*dev->dev_ops->mac_addr_set != NULL)
1670                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1671         else if (*dev->dev_ops->mac_addr_add != NULL)
1672                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1673
1674         if (*dev->dev_ops->mac_addr_add != NULL) {
1675                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1676                         addr = &dev->data->mac_addrs[i];
1677
1678                         /* skip zero address */
1679                         if (rte_is_zero_ether_addr(addr))
1680                                 continue;
1681
1682                         pool = 0;
1683                         pool_mask = dev->data->mac_pool_sel[i];
1684
1685                         do {
1686                                 if (pool_mask & 1ULL)
1687                                         (*dev->dev_ops->mac_addr_add)(dev,
1688                                                 addr, i, pool);
1689                                 pool_mask >>= 1;
1690                                 pool++;
1691                         } while (pool_mask);
1692                 }
1693         }
1694 }
1695
1696 static int
1697 eth_dev_config_restore(struct rte_eth_dev *dev,
1698                 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1699 {
1700         int ret;
1701
1702         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1703                 eth_dev_mac_restore(dev, dev_info);
1704
1705         /* replay promiscuous configuration */
1706         /*
1707          * use callbacks directly since we don't need port_id check and
1708          * would like to bypass the same value set
1709          */
1710         if (rte_eth_promiscuous_get(port_id) == 1 &&
1711             *dev->dev_ops->promiscuous_enable != NULL) {
1712                 ret = eth_err(port_id,
1713                               (*dev->dev_ops->promiscuous_enable)(dev));
1714                 if (ret != 0 && ret != -ENOTSUP) {
1715                         RTE_ETHDEV_LOG(ERR,
1716                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1717                                 port_id, rte_strerror(-ret));
1718                         return ret;
1719                 }
1720         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1721                    *dev->dev_ops->promiscuous_disable != NULL) {
1722                 ret = eth_err(port_id,
1723                               (*dev->dev_ops->promiscuous_disable)(dev));
1724                 if (ret != 0 && ret != -ENOTSUP) {
1725                         RTE_ETHDEV_LOG(ERR,
1726                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1727                                 port_id, rte_strerror(-ret));
1728                         return ret;
1729                 }
1730         }
1731
1732         /* replay all multicast configuration */
1733         /*
1734          * use callbacks directly since we don't need port_id check and
1735          * would like to bypass the same value set
1736          */
1737         if (rte_eth_allmulticast_get(port_id) == 1 &&
1738             *dev->dev_ops->allmulticast_enable != NULL) {
1739                 ret = eth_err(port_id,
1740                               (*dev->dev_ops->allmulticast_enable)(dev));
1741                 if (ret != 0 && ret != -ENOTSUP) {
1742                         RTE_ETHDEV_LOG(ERR,
1743                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1744                                 port_id, rte_strerror(-ret));
1745                         return ret;
1746                 }
1747         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1748                    *dev->dev_ops->allmulticast_disable != NULL) {
1749                 ret = eth_err(port_id,
1750                               (*dev->dev_ops->allmulticast_disable)(dev));
1751                 if (ret != 0 && ret != -ENOTSUP) {
1752                         RTE_ETHDEV_LOG(ERR,
1753                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1754                                 port_id, rte_strerror(-ret));
1755                         return ret;
1756                 }
1757         }
1758
1759         return 0;
1760 }
1761
1762 int
1763 rte_eth_dev_start(uint16_t port_id)
1764 {
1765         struct rte_eth_dev *dev;
1766         struct rte_eth_dev_info dev_info;
1767         int diag;
1768         int ret, ret_stop;
1769
1770         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1771         dev = &rte_eth_devices[port_id];
1772
1773         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1774
1775         if (dev->data->dev_configured == 0) {
1776                 RTE_ETHDEV_LOG(INFO,
1777                         "Device with port_id=%"PRIu16" is not configured.\n",
1778                         port_id);
1779                 return -EINVAL;
1780         }
1781
1782         if (dev->data->dev_started != 0) {
1783                 RTE_ETHDEV_LOG(INFO,
1784                         "Device with port_id=%"PRIu16" already started\n",
1785                         port_id);
1786                 return 0;
1787         }
1788
1789         ret = rte_eth_dev_info_get(port_id, &dev_info);
1790         if (ret != 0)
1791                 return ret;
1792
1793         /* Lets restore MAC now if device does not support live change */
1794         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1795                 eth_dev_mac_restore(dev, &dev_info);
1796
1797         diag = (*dev->dev_ops->dev_start)(dev);
1798         if (diag == 0)
1799                 dev->data->dev_started = 1;
1800         else
1801                 return eth_err(port_id, diag);
1802
1803         ret = eth_dev_config_restore(dev, &dev_info, port_id);
1804         if (ret != 0) {
1805                 RTE_ETHDEV_LOG(ERR,
1806                         "Error during restoring configuration for device (port %u): %s\n",
1807                         port_id, rte_strerror(-ret));
1808                 ret_stop = rte_eth_dev_stop(port_id);
1809                 if (ret_stop != 0) {
1810                         RTE_ETHDEV_LOG(ERR,
1811                                 "Failed to stop device (port %u): %s\n",
1812                                 port_id, rte_strerror(-ret_stop));
1813                 }
1814
1815                 return ret;
1816         }
1817
1818         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1819                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1820                 (*dev->dev_ops->link_update)(dev, 0);
1821         }
1822
1823         rte_ethdev_trace_start(port_id);
1824         return 0;
1825 }
1826
1827 int
1828 rte_eth_dev_stop(uint16_t port_id)
1829 {
1830         struct rte_eth_dev *dev;
1831         int ret;
1832
1833         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1834         dev = &rte_eth_devices[port_id];
1835
1836         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1837
1838         if (dev->data->dev_started == 0) {
1839                 RTE_ETHDEV_LOG(INFO,
1840                         "Device with port_id=%"PRIu16" already stopped\n",
1841                         port_id);
1842                 return 0;
1843         }
1844
1845         dev->data->dev_started = 0;
1846         ret = (*dev->dev_ops->dev_stop)(dev);
1847         rte_ethdev_trace_stop(port_id, ret);
1848
1849         return ret;
1850 }
1851
1852 int
1853 rte_eth_dev_set_link_up(uint16_t port_id)
1854 {
1855         struct rte_eth_dev *dev;
1856
1857         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1858         dev = &rte_eth_devices[port_id];
1859
1860         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1861         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1862 }
1863
1864 int
1865 rte_eth_dev_set_link_down(uint16_t port_id)
1866 {
1867         struct rte_eth_dev *dev;
1868
1869         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1870         dev = &rte_eth_devices[port_id];
1871
1872         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1873         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1874 }
1875
1876 int
1877 rte_eth_dev_close(uint16_t port_id)
1878 {
1879         struct rte_eth_dev *dev;
1880         int firsterr, binerr;
1881         int *lasterr = &firsterr;
1882
1883         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1884         dev = &rte_eth_devices[port_id];
1885
1886         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1887         *lasterr = (*dev->dev_ops->dev_close)(dev);
1888         if (*lasterr != 0)
1889                 lasterr = &binerr;
1890
1891         rte_ethdev_trace_close(port_id);
1892         *lasterr = rte_eth_dev_release_port(dev);
1893
1894         return firsterr;
1895 }
1896
1897 int
1898 rte_eth_dev_reset(uint16_t port_id)
1899 {
1900         struct rte_eth_dev *dev;
1901         int ret;
1902
1903         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1904         dev = &rte_eth_devices[port_id];
1905
1906         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1907
1908         ret = rte_eth_dev_stop(port_id);
1909         if (ret != 0) {
1910                 RTE_ETHDEV_LOG(ERR,
1911                         "Failed to stop device (port %u) before reset: %s - ignore\n",
1912                         port_id, rte_strerror(-ret));
1913         }
1914         ret = dev->dev_ops->dev_reset(dev);
1915
1916         return eth_err(port_id, ret);
1917 }
1918
1919 int
1920 rte_eth_dev_is_removed(uint16_t port_id)
1921 {
1922         struct rte_eth_dev *dev;
1923         int ret;
1924
1925         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1926         dev = &rte_eth_devices[port_id];
1927
1928         if (dev->state == RTE_ETH_DEV_REMOVED)
1929                 return 1;
1930
1931         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1932
1933         ret = dev->dev_ops->is_removed(dev);
1934         if (ret != 0)
1935                 /* Device is physically removed. */
1936                 dev->state = RTE_ETH_DEV_REMOVED;
1937
1938         return ret;
1939 }
1940
1941 static int
1942 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1943                              uint16_t n_seg, uint32_t *mbp_buf_size,
1944                              const struct rte_eth_dev_info *dev_info)
1945 {
1946         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1947         struct rte_mempool *mp_first;
1948         uint32_t offset_mask;
1949         uint16_t seg_idx;
1950
1951         if (n_seg > seg_capa->max_nseg) {
1952                 RTE_ETHDEV_LOG(ERR,
1953                                "Requested Rx segments %u exceed supported %u\n",
1954                                n_seg, seg_capa->max_nseg);
1955                 return -EINVAL;
1956         }
1957         /*
1958          * Check the sizes and offsets against buffer sizes
1959          * for each segment specified in extended configuration.
1960          */
1961         mp_first = rx_seg[0].mp;
1962         offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1963         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1964                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1965                 uint32_t length = rx_seg[seg_idx].length;
1966                 uint32_t offset = rx_seg[seg_idx].offset;
1967
1968                 if (mpl == NULL) {
1969                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1970                         return -EINVAL;
1971                 }
1972                 if (seg_idx != 0 && mp_first != mpl &&
1973                     seg_capa->multi_pools == 0) {
1974                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1975                         return -ENOTSUP;
1976                 }
1977                 if (offset != 0) {
1978                         if (seg_capa->offset_allowed == 0) {
1979                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1980                                 return -ENOTSUP;
1981                         }
1982                         if (offset & offset_mask) {
1983                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1984                                                offset,
1985                                                seg_capa->offset_align_log2);
1986                                 return -EINVAL;
1987                         }
1988                 }
1989                 if (mpl->private_data_size <
1990                         sizeof(struct rte_pktmbuf_pool_private)) {
1991                         RTE_ETHDEV_LOG(ERR,
1992                                        "%s private_data_size %u < %u\n",
1993                                        mpl->name, mpl->private_data_size,
1994                                        (unsigned int)sizeof
1995                                         (struct rte_pktmbuf_pool_private));
1996                         return -ENOSPC;
1997                 }
1998                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1999                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
2000                 length = length != 0 ? length : *mbp_buf_size;
2001                 if (*mbp_buf_size < length + offset) {
2002                         RTE_ETHDEV_LOG(ERR,
2003                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
2004                                        mpl->name, *mbp_buf_size,
2005                                        length + offset, length, offset);
2006                         return -EINVAL;
2007                 }
2008         }
2009         return 0;
2010 }
2011
2012 int
2013 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2014                        uint16_t nb_rx_desc, unsigned int socket_id,
2015                        const struct rte_eth_rxconf *rx_conf,
2016                        struct rte_mempool *mp)
2017 {
2018         int ret;
2019         uint32_t mbp_buf_size;
2020         struct rte_eth_dev *dev;
2021         struct rte_eth_dev_info dev_info;
2022         struct rte_eth_rxconf local_conf;
2023
2024         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2025         dev = &rte_eth_devices[port_id];
2026
2027         if (rx_queue_id >= dev->data->nb_rx_queues) {
2028                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2029                 return -EINVAL;
2030         }
2031
2032         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
2033
2034         ret = rte_eth_dev_info_get(port_id, &dev_info);
2035         if (ret != 0)
2036                 return ret;
2037
2038         if (mp != NULL) {
2039                 /* Single pool configuration check. */
2040                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
2041                         RTE_ETHDEV_LOG(ERR,
2042                                        "Ambiguous segment configuration\n");
2043                         return -EINVAL;
2044                 }
2045                 /*
2046                  * Check the size of the mbuf data buffer, this value
2047                  * must be provided in the private data of the memory pool.
2048                  * First check that the memory pool(s) has a valid private data.
2049                  */
2050                 if (mp->private_data_size <
2051                                 sizeof(struct rte_pktmbuf_pool_private)) {
2052                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
2053                                 mp->name, mp->private_data_size,
2054                                 (unsigned int)
2055                                 sizeof(struct rte_pktmbuf_pool_private));
2056                         return -ENOSPC;
2057                 }
2058                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
2059                 if (mbp_buf_size < dev_info.min_rx_bufsize +
2060                                    RTE_PKTMBUF_HEADROOM) {
2061                         RTE_ETHDEV_LOG(ERR,
2062                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
2063                                        mp->name, mbp_buf_size,
2064                                        RTE_PKTMBUF_HEADROOM +
2065                                        dev_info.min_rx_bufsize,
2066                                        RTE_PKTMBUF_HEADROOM,
2067                                        dev_info.min_rx_bufsize);
2068                         return -EINVAL;
2069                 }
2070         } else {
2071                 const struct rte_eth_rxseg_split *rx_seg;
2072                 uint16_t n_seg;
2073
2074                 /* Extended multi-segment configuration check. */
2075                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
2076                         RTE_ETHDEV_LOG(ERR,
2077                                        "Memory pool is null and no extended configuration provided\n");
2078                         return -EINVAL;
2079                 }
2080
2081                 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2082                 n_seg = rx_conf->rx_nseg;
2083
2084                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2085                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
2086                                                            &mbp_buf_size,
2087                                                            &dev_info);
2088                         if (ret != 0)
2089                                 return ret;
2090                 } else {
2091                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2092                         return -EINVAL;
2093                 }
2094         }
2095
2096         /* Use default specified by driver, if nb_rx_desc is zero */
2097         if (nb_rx_desc == 0) {
2098                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
2099                 /* If driver default is also zero, fall back on EAL default */
2100                 if (nb_rx_desc == 0)
2101                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2102         }
2103
2104         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2105                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2106                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2107
2108                 RTE_ETHDEV_LOG(ERR,
2109                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2110                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2111                         dev_info.rx_desc_lim.nb_min,
2112                         dev_info.rx_desc_lim.nb_align);
2113                 return -EINVAL;
2114         }
2115
2116         if (dev->data->dev_started &&
2117                 !(dev_info.dev_capa &
2118                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2119                 return -EBUSY;
2120
2121         if (dev->data->dev_started &&
2122                 (dev->data->rx_queue_state[rx_queue_id] !=
2123                         RTE_ETH_QUEUE_STATE_STOPPED))
2124                 return -EBUSY;
2125
2126         eth_dev_rxq_release(dev, rx_queue_id);
2127
2128         if (rx_conf == NULL)
2129                 rx_conf = &dev_info.default_rxconf;
2130
2131         local_conf = *rx_conf;
2132
2133         /*
2134          * If an offloading has already been enabled in
2135          * rte_eth_dev_configure(), it has been enabled on all queues,
2136          * so there is no need to enable it in this queue again.
2137          * The local_conf.offloads input to underlying PMD only carries
2138          * those offloadings which are only enabled on this queue and
2139          * not enabled on all queues.
2140          */
2141         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2142
2143         /*
2144          * New added offloadings for this queue are those not enabled in
2145          * rte_eth_dev_configure() and they must be per-queue type.
2146          * A pure per-port offloading can't be enabled on a queue while
2147          * disabled on another queue. A pure per-port offloading can't
2148          * be enabled for any queue as new added one if it hasn't been
2149          * enabled in rte_eth_dev_configure().
2150          */
2151         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2152              local_conf.offloads) {
2153                 RTE_ETHDEV_LOG(ERR,
2154                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2155                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2156                         port_id, rx_queue_id, local_conf.offloads,
2157                         dev_info.rx_queue_offload_capa,
2158                         __func__);
2159                 return -EINVAL;
2160         }
2161
2162         /*
2163          * If LRO is enabled, check that the maximum aggregated packet
2164          * size is supported by the configured device.
2165          */
2166         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2167                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2168                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
2169                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2170                 int ret = eth_dev_check_lro_pkt_size(port_id,
2171                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2172                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2173                                 dev_info.max_lro_pkt_size);
2174                 if (ret != 0)
2175                         return ret;
2176         }
2177
2178         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2179                                               socket_id, &local_conf, mp);
2180         if (!ret) {
2181                 if (!dev->data->min_rx_buf_size ||
2182                     dev->data->min_rx_buf_size > mbp_buf_size)
2183                         dev->data->min_rx_buf_size = mbp_buf_size;
2184         }
2185
2186         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2187                 rx_conf, ret);
2188         return eth_err(port_id, ret);
2189 }
2190
2191 int
2192 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2193                                uint16_t nb_rx_desc,
2194                                const struct rte_eth_hairpin_conf *conf)
2195 {
2196         int ret;
2197         struct rte_eth_dev *dev;
2198         struct rte_eth_hairpin_cap cap;
2199         int i;
2200         int count;
2201
2202         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2203         dev = &rte_eth_devices[port_id];
2204
2205         if (rx_queue_id >= dev->data->nb_rx_queues) {
2206                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2207                 return -EINVAL;
2208         }
2209
2210         if (conf == NULL) {
2211                 RTE_ETHDEV_LOG(ERR,
2212                         "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
2213                         port_id);
2214                 return -EINVAL;
2215         }
2216
2217         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2218         if (ret != 0)
2219                 return ret;
2220         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2221                                 -ENOTSUP);
2222         /* if nb_rx_desc is zero use max number of desc from the driver. */
2223         if (nb_rx_desc == 0)
2224                 nb_rx_desc = cap.max_nb_desc;
2225         if (nb_rx_desc > cap.max_nb_desc) {
2226                 RTE_ETHDEV_LOG(ERR,
2227                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2228                         nb_rx_desc, cap.max_nb_desc);
2229                 return -EINVAL;
2230         }
2231         if (conf->peer_count > cap.max_rx_2_tx) {
2232                 RTE_ETHDEV_LOG(ERR,
2233                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2234                         conf->peer_count, cap.max_rx_2_tx);
2235                 return -EINVAL;
2236         }
2237         if (conf->peer_count == 0) {
2238                 RTE_ETHDEV_LOG(ERR,
2239                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2240                         conf->peer_count);
2241                 return -EINVAL;
2242         }
2243         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2244              cap.max_nb_queues != UINT16_MAX; i++) {
2245                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2246                         count++;
2247         }
2248         if (count > cap.max_nb_queues) {
2249                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2250                 cap.max_nb_queues);
2251                 return -EINVAL;
2252         }
2253         if (dev->data->dev_started)
2254                 return -EBUSY;
2255         eth_dev_rxq_release(dev, rx_queue_id);
2256         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2257                                                       nb_rx_desc, conf);
2258         if (ret == 0)
2259                 dev->data->rx_queue_state[rx_queue_id] =
2260                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2261         return eth_err(port_id, ret);
2262 }
2263
2264 int
2265 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2266                        uint16_t nb_tx_desc, unsigned int socket_id,
2267                        const struct rte_eth_txconf *tx_conf)
2268 {
2269         struct rte_eth_dev *dev;
2270         struct rte_eth_dev_info dev_info;
2271         struct rte_eth_txconf local_conf;
2272         int ret;
2273
2274         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2275         dev = &rte_eth_devices[port_id];
2276
2277         if (tx_queue_id >= dev->data->nb_tx_queues) {
2278                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2279                 return -EINVAL;
2280         }
2281
2282         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2283
2284         ret = rte_eth_dev_info_get(port_id, &dev_info);
2285         if (ret != 0)
2286                 return ret;
2287
2288         /* Use default specified by driver, if nb_tx_desc is zero */
2289         if (nb_tx_desc == 0) {
2290                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2291                 /* If driver default is zero, fall back on EAL default */
2292                 if (nb_tx_desc == 0)
2293                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2294         }
2295         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2296             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2297             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2298                 RTE_ETHDEV_LOG(ERR,
2299                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2300                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2301                         dev_info.tx_desc_lim.nb_min,
2302                         dev_info.tx_desc_lim.nb_align);
2303                 return -EINVAL;
2304         }
2305
2306         if (dev->data->dev_started &&
2307                 !(dev_info.dev_capa &
2308                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2309                 return -EBUSY;
2310
2311         if (dev->data->dev_started &&
2312                 (dev->data->tx_queue_state[tx_queue_id] !=
2313                         RTE_ETH_QUEUE_STATE_STOPPED))
2314                 return -EBUSY;
2315
2316         eth_dev_txq_release(dev, tx_queue_id);
2317
2318         if (tx_conf == NULL)
2319                 tx_conf = &dev_info.default_txconf;
2320
2321         local_conf = *tx_conf;
2322
2323         /*
2324          * If an offloading has already been enabled in
2325          * rte_eth_dev_configure(), it has been enabled on all queues,
2326          * so there is no need to enable it in this queue again.
2327          * The local_conf.offloads input to underlying PMD only carries
2328          * those offloadings which are only enabled on this queue and
2329          * not enabled on all queues.
2330          */
2331         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2332
2333         /*
2334          * New added offloadings for this queue are those not enabled in
2335          * rte_eth_dev_configure() and they must be per-queue type.
2336          * A pure per-port offloading can't be enabled on a queue while
2337          * disabled on another queue. A pure per-port offloading can't
2338          * be enabled for any queue as new added one if it hasn't been
2339          * enabled in rte_eth_dev_configure().
2340          */
2341         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2342              local_conf.offloads) {
2343                 RTE_ETHDEV_LOG(ERR,
2344                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2345                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2346                         port_id, tx_queue_id, local_conf.offloads,
2347                         dev_info.tx_queue_offload_capa,
2348                         __func__);
2349                 return -EINVAL;
2350         }
2351
2352         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2353         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2354                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2355 }
2356
2357 int
2358 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2359                                uint16_t nb_tx_desc,
2360                                const struct rte_eth_hairpin_conf *conf)
2361 {
2362         struct rte_eth_dev *dev;
2363         struct rte_eth_hairpin_cap cap;
2364         int i;
2365         int count;
2366         int ret;
2367
2368         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2369         dev = &rte_eth_devices[port_id];
2370
2371         if (tx_queue_id >= dev->data->nb_tx_queues) {
2372                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2373                 return -EINVAL;
2374         }
2375
2376         if (conf == NULL) {
2377                 RTE_ETHDEV_LOG(ERR,
2378                         "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2379                         port_id);
2380                 return -EINVAL;
2381         }
2382
2383         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2384         if (ret != 0)
2385                 return ret;
2386         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2387                                 -ENOTSUP);
2388         /* if nb_rx_desc is zero use max number of desc from the driver. */
2389         if (nb_tx_desc == 0)
2390                 nb_tx_desc = cap.max_nb_desc;
2391         if (nb_tx_desc > cap.max_nb_desc) {
2392                 RTE_ETHDEV_LOG(ERR,
2393                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2394                         nb_tx_desc, cap.max_nb_desc);
2395                 return -EINVAL;
2396         }
2397         if (conf->peer_count > cap.max_tx_2_rx) {
2398                 RTE_ETHDEV_LOG(ERR,
2399                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2400                         conf->peer_count, cap.max_tx_2_rx);
2401                 return -EINVAL;
2402         }
2403         if (conf->peer_count == 0) {
2404                 RTE_ETHDEV_LOG(ERR,
2405                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2406                         conf->peer_count);
2407                 return -EINVAL;
2408         }
2409         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2410              cap.max_nb_queues != UINT16_MAX; i++) {
2411                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2412                         count++;
2413         }
2414         if (count > cap.max_nb_queues) {
2415                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2416                 cap.max_nb_queues);
2417                 return -EINVAL;
2418         }
2419         if (dev->data->dev_started)
2420                 return -EBUSY;
2421         eth_dev_txq_release(dev, tx_queue_id);
2422         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2423                 (dev, tx_queue_id, nb_tx_desc, conf);
2424         if (ret == 0)
2425                 dev->data->tx_queue_state[tx_queue_id] =
2426                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2427         return eth_err(port_id, ret);
2428 }
2429
2430 int
2431 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2432 {
2433         struct rte_eth_dev *dev;
2434         int ret;
2435
2436         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2437         dev = &rte_eth_devices[tx_port];
2438
2439         if (dev->data->dev_started == 0) {
2440                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2441                 return -EBUSY;
2442         }
2443
2444         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2445         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2446         if (ret != 0)
2447                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2448                                " to Rx %d (%d - all ports)\n",
2449                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2450
2451         return ret;
2452 }
2453
2454 int
2455 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2456 {
2457         struct rte_eth_dev *dev;
2458         int ret;
2459
2460         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2461         dev = &rte_eth_devices[tx_port];
2462
2463         if (dev->data->dev_started == 0) {
2464                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2465                 return -EBUSY;
2466         }
2467
2468         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2469         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2470         if (ret != 0)
2471                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2472                                " from Rx %d (%d - all ports)\n",
2473                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2474
2475         return ret;
2476 }
2477
2478 int
2479 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2480                                size_t len, uint32_t direction)
2481 {
2482         struct rte_eth_dev *dev;
2483         int ret;
2484
2485         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2486         dev = &rte_eth_devices[port_id];
2487
2488         if (peer_ports == NULL) {
2489                 RTE_ETHDEV_LOG(ERR,
2490                         "Cannot get ethdev port %u hairpin peer ports to NULL\n",
2491                         port_id);
2492                 return -EINVAL;
2493         }
2494
2495         if (len == 0) {
2496                 RTE_ETHDEV_LOG(ERR,
2497                         "Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2498                         port_id);
2499                 return -EINVAL;
2500         }
2501
2502         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2503                                 -ENOTSUP);
2504
2505         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2506                                                       len, direction);
2507         if (ret < 0)
2508                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2509                                port_id, direction ? "Rx" : "Tx");
2510
2511         return ret;
2512 }
2513
2514 void
2515 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2516                 void *userdata __rte_unused)
2517 {
2518         rte_pktmbuf_free_bulk(pkts, unsent);
2519 }
2520
2521 void
2522 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2523                 void *userdata)
2524 {
2525         uint64_t *count = userdata;
2526
2527         rte_pktmbuf_free_bulk(pkts, unsent);
2528         *count += unsent;
2529 }
2530
2531 int
2532 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2533                 buffer_tx_error_fn cbfn, void *userdata)
2534 {
2535         if (buffer == NULL) {
2536                 RTE_ETHDEV_LOG(ERR,
2537                         "Cannot set Tx buffer error callback to NULL buffer\n");
2538                 return -EINVAL;
2539         }
2540
2541         buffer->error_callback = cbfn;
2542         buffer->error_userdata = userdata;
2543         return 0;
2544 }
2545
2546 int
2547 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2548 {
2549         int ret = 0;
2550
2551         if (buffer == NULL) {
2552                 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2553                 return -EINVAL;
2554         }
2555
2556         buffer->size = size;
2557         if (buffer->error_callback == NULL) {
2558                 ret = rte_eth_tx_buffer_set_err_callback(
2559                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2560         }
2561
2562         return ret;
2563 }
2564
2565 int
2566 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2567 {
2568         struct rte_eth_dev *dev;
2569         int ret;
2570
2571         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2572         dev = &rte_eth_devices[port_id];
2573
2574         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2575
2576         /* Call driver to free pending mbufs. */
2577         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2578                                                free_cnt);
2579         return eth_err(port_id, ret);
2580 }
2581
2582 int
2583 rte_eth_promiscuous_enable(uint16_t port_id)
2584 {
2585         struct rte_eth_dev *dev;
2586         int diag = 0;
2587
2588         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2589         dev = &rte_eth_devices[port_id];
2590
2591         if (dev->data->promiscuous == 1)
2592                 return 0;
2593
2594         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2595
2596         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2597         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2598
2599         return eth_err(port_id, diag);
2600 }
2601
2602 int
2603 rte_eth_promiscuous_disable(uint16_t port_id)
2604 {
2605         struct rte_eth_dev *dev;
2606         int diag = 0;
2607
2608         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2609         dev = &rte_eth_devices[port_id];
2610
2611         if (dev->data->promiscuous == 0)
2612                 return 0;
2613
2614         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2615
2616         dev->data->promiscuous = 0;
2617         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2618         if (diag != 0)
2619                 dev->data->promiscuous = 1;
2620
2621         return eth_err(port_id, diag);
2622 }
2623
2624 int
2625 rte_eth_promiscuous_get(uint16_t port_id)
2626 {
2627         struct rte_eth_dev *dev;
2628
2629         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2630         dev = &rte_eth_devices[port_id];
2631
2632         return dev->data->promiscuous;
2633 }
2634
2635 int
2636 rte_eth_allmulticast_enable(uint16_t port_id)
2637 {
2638         struct rte_eth_dev *dev;
2639         int diag;
2640
2641         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2642         dev = &rte_eth_devices[port_id];
2643
2644         if (dev->data->all_multicast == 1)
2645                 return 0;
2646
2647         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2648         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2649         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2650
2651         return eth_err(port_id, diag);
2652 }
2653
2654 int
2655 rte_eth_allmulticast_disable(uint16_t port_id)
2656 {
2657         struct rte_eth_dev *dev;
2658         int diag;
2659
2660         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2661         dev = &rte_eth_devices[port_id];
2662
2663         if (dev->data->all_multicast == 0)
2664                 return 0;
2665
2666         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2667         dev->data->all_multicast = 0;
2668         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2669         if (diag != 0)
2670                 dev->data->all_multicast = 1;
2671
2672         return eth_err(port_id, diag);
2673 }
2674
2675 int
2676 rte_eth_allmulticast_get(uint16_t port_id)
2677 {
2678         struct rte_eth_dev *dev;
2679
2680         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2681         dev = &rte_eth_devices[port_id];
2682
2683         return dev->data->all_multicast;
2684 }
2685
2686 int
2687 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2688 {
2689         struct rte_eth_dev *dev;
2690
2691         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2692         dev = &rte_eth_devices[port_id];
2693
2694         if (eth_link == NULL) {
2695                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2696                         port_id);
2697                 return -EINVAL;
2698         }
2699
2700         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2701                 rte_eth_linkstatus_get(dev, eth_link);
2702         else {
2703                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2704                 (*dev->dev_ops->link_update)(dev, 1);
2705                 *eth_link = dev->data->dev_link;
2706         }
2707
2708         return 0;
2709 }
2710
2711 int
2712 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2713 {
2714         struct rte_eth_dev *dev;
2715
2716         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2717         dev = &rte_eth_devices[port_id];
2718
2719         if (eth_link == NULL) {
2720                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2721                         port_id);
2722                 return -EINVAL;
2723         }
2724
2725         if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2726                 rte_eth_linkstatus_get(dev, eth_link);
2727         else {
2728                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2729                 (*dev->dev_ops->link_update)(dev, 0);
2730                 *eth_link = dev->data->dev_link;
2731         }
2732
2733         return 0;
2734 }
2735
2736 const char *
2737 rte_eth_link_speed_to_str(uint32_t link_speed)
2738 {
2739         switch (link_speed) {
2740         case ETH_SPEED_NUM_NONE: return "None";
2741         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2742         case ETH_SPEED_NUM_100M: return "100 Mbps";
2743         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2744         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2745         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2746         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2747         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2748         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2749         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2750         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2751         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2752         case ETH_SPEED_NUM_100G: return "100 Gbps";
2753         case ETH_SPEED_NUM_200G: return "200 Gbps";
2754         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2755         default: return "Invalid";
2756         }
2757 }
2758
2759 int
2760 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2761 {
2762         if (str == NULL) {
2763                 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2764                 return -EINVAL;
2765         }
2766
2767         if (len == 0) {
2768                 RTE_ETHDEV_LOG(ERR,
2769                         "Cannot convert link to string with zero size\n");
2770                 return -EINVAL;
2771         }
2772
2773         if (eth_link == NULL) {
2774                 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2775                 return -EINVAL;
2776         }
2777
2778         if (eth_link->link_status == ETH_LINK_DOWN)
2779                 return snprintf(str, len, "Link down");
2780         else
2781                 return snprintf(str, len, "Link up at %s %s %s",
2782                         rte_eth_link_speed_to_str(eth_link->link_speed),
2783                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2784                         "FDX" : "HDX",
2785                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2786                         "Autoneg" : "Fixed");
2787 }
2788
2789 int
2790 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2791 {
2792         struct rte_eth_dev *dev;
2793
2794         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2795         dev = &rte_eth_devices[port_id];
2796
2797         if (stats == NULL) {
2798                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2799                         port_id);
2800                 return -EINVAL;
2801         }
2802
2803         memset(stats, 0, sizeof(*stats));
2804
2805         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2806         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2807         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2808 }
2809
2810 int
2811 rte_eth_stats_reset(uint16_t port_id)
2812 {
2813         struct rte_eth_dev *dev;
2814         int ret;
2815
2816         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2817         dev = &rte_eth_devices[port_id];
2818
2819         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2820         ret = (*dev->dev_ops->stats_reset)(dev);
2821         if (ret != 0)
2822                 return eth_err(port_id, ret);
2823
2824         dev->data->rx_mbuf_alloc_failed = 0;
2825
2826         return 0;
2827 }
2828
2829 static inline int
2830 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2831 {
2832         uint16_t nb_rxqs, nb_txqs;
2833         int count;
2834
2835         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2836         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2837
2838         count = RTE_NB_STATS;
2839         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2840                 count += nb_rxqs * RTE_NB_RXQ_STATS;
2841                 count += nb_txqs * RTE_NB_TXQ_STATS;
2842         }
2843
2844         return count;
2845 }
2846
2847 static int
2848 eth_dev_get_xstats_count(uint16_t port_id)
2849 {
2850         struct rte_eth_dev *dev;
2851         int count;
2852
2853         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2854         dev = &rte_eth_devices[port_id];
2855         if (dev->dev_ops->xstats_get_names != NULL) {
2856                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2857                 if (count < 0)
2858                         return eth_err(port_id, count);
2859         } else
2860                 count = 0;
2861
2862
2863         count += eth_dev_get_xstats_basic_count(dev);
2864
2865         return count;
2866 }
2867
2868 int
2869 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2870                 uint64_t *id)
2871 {
2872         int cnt_xstats, idx_xstat;
2873
2874         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2875
2876         if (xstat_name == NULL) {
2877                 RTE_ETHDEV_LOG(ERR,
2878                         "Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2879                         port_id);
2880                 return -ENOMEM;
2881         }
2882
2883         if (id == NULL) {
2884                 RTE_ETHDEV_LOG(ERR,
2885                         "Cannot get ethdev port %u xstats ID to NULL\n",
2886                         port_id);
2887                 return -ENOMEM;
2888         }
2889
2890         /* Get count */
2891         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2892         if (cnt_xstats  < 0) {
2893                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2894                 return -ENODEV;
2895         }
2896
2897         /* Get id-name lookup table */
2898         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2899
2900         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2901                         port_id, xstats_names, cnt_xstats, NULL)) {
2902                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2903                 return -1;
2904         }
2905
2906         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2907                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2908                         *id = idx_xstat;
2909                         return 0;
2910                 };
2911         }
2912
2913         return -EINVAL;
2914 }
2915
2916 /* retrieve basic stats names */
2917 static int
2918 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2919         struct rte_eth_xstat_name *xstats_names)
2920 {
2921         int cnt_used_entries = 0;
2922         uint32_t idx, id_queue;
2923         uint16_t num_q;
2924
2925         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2926                 strlcpy(xstats_names[cnt_used_entries].name,
2927                         eth_dev_stats_strings[idx].name,
2928                         sizeof(xstats_names[0].name));
2929                 cnt_used_entries++;
2930         }
2931
2932         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2933                 return cnt_used_entries;
2934
2935         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2936         for (id_queue = 0; id_queue < num_q; id_queue++) {
2937                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2938                         snprintf(xstats_names[cnt_used_entries].name,
2939                                 sizeof(xstats_names[0].name),
2940                                 "rx_q%u_%s",
2941                                 id_queue, eth_dev_rxq_stats_strings[idx].name);
2942                         cnt_used_entries++;
2943                 }
2944
2945         }
2946         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2947         for (id_queue = 0; id_queue < num_q; id_queue++) {
2948                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2949                         snprintf(xstats_names[cnt_used_entries].name,
2950                                 sizeof(xstats_names[0].name),
2951                                 "tx_q%u_%s",
2952                                 id_queue, eth_dev_txq_stats_strings[idx].name);
2953                         cnt_used_entries++;
2954                 }
2955         }
2956         return cnt_used_entries;
2957 }
2958
2959 /* retrieve ethdev extended statistics names */
2960 int
2961 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2962         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2963         uint64_t *ids)
2964 {
2965         struct rte_eth_xstat_name *xstats_names_copy;
2966         unsigned int no_basic_stat_requested = 1;
2967         unsigned int no_ext_stat_requested = 1;
2968         unsigned int expected_entries;
2969         unsigned int basic_count;
2970         struct rte_eth_dev *dev;
2971         unsigned int i;
2972         int ret;
2973
2974         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2975         dev = &rte_eth_devices[port_id];
2976
2977         basic_count = eth_dev_get_xstats_basic_count(dev);
2978         ret = eth_dev_get_xstats_count(port_id);
2979         if (ret < 0)
2980                 return ret;
2981         expected_entries = (unsigned int)ret;
2982
2983         /* Return max number of stats if no ids given */
2984         if (!ids) {
2985                 if (!xstats_names)
2986                         return expected_entries;
2987                 else if (xstats_names && size < expected_entries)
2988                         return expected_entries;
2989         }
2990
2991         if (ids && !xstats_names)
2992                 return -EINVAL;
2993
2994         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2995                 uint64_t ids_copy[size];
2996
2997                 for (i = 0; i < size; i++) {
2998                         if (ids[i] < basic_count) {
2999                                 no_basic_stat_requested = 0;
3000                                 break;
3001                         }
3002
3003                         /*
3004                          * Convert ids to xstats ids that PMD knows.
3005                          * ids known by user are basic + extended stats.
3006                          */
3007                         ids_copy[i] = ids[i] - basic_count;
3008                 }
3009
3010                 if (no_basic_stat_requested)
3011                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
3012                                         ids_copy, xstats_names, size);
3013         }
3014
3015         /* Retrieve all stats */
3016         if (!ids) {
3017                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
3018                                 expected_entries);
3019                 if (num_stats < 0 || num_stats > (int)expected_entries)
3020                         return num_stats;
3021                 else
3022                         return expected_entries;
3023         }
3024
3025         xstats_names_copy = calloc(expected_entries,
3026                 sizeof(struct rte_eth_xstat_name));
3027
3028         if (!xstats_names_copy) {
3029                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
3030                 return -ENOMEM;
3031         }
3032
3033         if (ids) {
3034                 for (i = 0; i < size; i++) {
3035                         if (ids[i] >= basic_count) {
3036                                 no_ext_stat_requested = 0;
3037                                 break;
3038                         }
3039                 }
3040         }
3041
3042         /* Fill xstats_names_copy structure */
3043         if (ids && no_ext_stat_requested) {
3044                 eth_basic_stats_get_names(dev, xstats_names_copy);
3045         } else {
3046                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
3047                         expected_entries);
3048                 if (ret < 0) {
3049                         free(xstats_names_copy);
3050                         return ret;
3051                 }
3052         }
3053
3054         /* Filter stats */
3055         for (i = 0; i < size; i++) {
3056                 if (ids[i] >= expected_entries) {
3057                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3058                         free(xstats_names_copy);
3059                         return -1;
3060                 }
3061                 xstats_names[i] = xstats_names_copy[ids[i]];
3062         }
3063
3064         free(xstats_names_copy);
3065         return size;
3066 }
3067
3068 int
3069 rte_eth_xstats_get_names(uint16_t port_id,
3070         struct rte_eth_xstat_name *xstats_names,
3071         unsigned int size)
3072 {
3073         struct rte_eth_dev *dev;
3074         int cnt_used_entries;
3075         int cnt_expected_entries;
3076         int cnt_driver_entries;
3077
3078         cnt_expected_entries = eth_dev_get_xstats_count(port_id);
3079         if (xstats_names == NULL || cnt_expected_entries < 0 ||
3080                         (int)size < cnt_expected_entries)
3081                 return cnt_expected_entries;
3082
3083         /* port_id checked in eth_dev_get_xstats_count() */
3084         dev = &rte_eth_devices[port_id];
3085
3086         cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3087
3088         if (dev->dev_ops->xstats_get_names != NULL) {
3089                 /* If there are any driver-specific xstats, append them
3090                  * to end of list.
3091                  */
3092                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3093                         dev,
3094                         xstats_names + cnt_used_entries,
3095                         size - cnt_used_entries);
3096                 if (cnt_driver_entries < 0)
3097                         return eth_err(port_id, cnt_driver_entries);
3098                 cnt_used_entries += cnt_driver_entries;
3099         }
3100
3101         return cnt_used_entries;
3102 }
3103
3104
3105 static int
3106 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
3107 {
3108         struct rte_eth_dev *dev;
3109         struct rte_eth_stats eth_stats;
3110         unsigned int count = 0, i, q;
3111         uint64_t val, *stats_ptr;
3112         uint16_t nb_rxqs, nb_txqs;
3113         int ret;
3114
3115         ret = rte_eth_stats_get(port_id, &eth_stats);
3116         if (ret < 0)
3117                 return ret;
3118
3119         dev = &rte_eth_devices[port_id];
3120
3121         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3122         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3123
3124         /* global stats */
3125         for (i = 0; i < RTE_NB_STATS; i++) {
3126                 stats_ptr = RTE_PTR_ADD(&eth_stats,
3127                                         eth_dev_stats_strings[i].offset);
3128                 val = *stats_ptr;
3129                 xstats[count++].value = val;
3130         }
3131
3132         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3133                 return count;
3134
3135         /* per-rxq stats */
3136         for (q = 0; q < nb_rxqs; q++) {
3137                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3138                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3139                                         eth_dev_rxq_stats_strings[i].offset +
3140                                         q * sizeof(uint64_t));
3141                         val = *stats_ptr;
3142                         xstats[count++].value = val;
3143                 }
3144         }
3145
3146         /* per-txq stats */
3147         for (q = 0; q < nb_txqs; q++) {
3148                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3149                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3150                                         eth_dev_txq_stats_strings[i].offset +
3151                                         q * sizeof(uint64_t));
3152                         val = *stats_ptr;
3153                         xstats[count++].value = val;
3154                 }
3155         }
3156         return count;
3157 }
3158
3159 /* retrieve ethdev extended statistics */
3160 int
3161 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3162                          uint64_t *values, unsigned int size)
3163 {
3164         unsigned int no_basic_stat_requested = 1;
3165         unsigned int no_ext_stat_requested = 1;
3166         unsigned int num_xstats_filled;
3167         unsigned int basic_count;
3168         uint16_t expected_entries;
3169         struct rte_eth_dev *dev;
3170         unsigned int i;
3171         int ret;
3172
3173         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3174         dev = &rte_eth_devices[port_id];
3175
3176         ret = eth_dev_get_xstats_count(port_id);
3177         if (ret < 0)
3178                 return ret;
3179         expected_entries = (uint16_t)ret;
3180         struct rte_eth_xstat xstats[expected_entries];
3181         basic_count = eth_dev_get_xstats_basic_count(dev);
3182
3183         /* Return max number of stats if no ids given */
3184         if (!ids) {
3185                 if (!values)
3186                         return expected_entries;
3187                 else if (values && size < expected_entries)
3188                         return expected_entries;
3189         }
3190
3191         if (ids && !values)
3192                 return -EINVAL;
3193
3194         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3195                 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3196                 uint64_t ids_copy[size];
3197
3198                 for (i = 0; i < size; i++) {
3199                         if (ids[i] < basic_count) {
3200                                 no_basic_stat_requested = 0;
3201                                 break;
3202                         }
3203
3204                         /*
3205                          * Convert ids to xstats ids that PMD knows.
3206                          * ids known by user are basic + extended stats.
3207                          */
3208                         ids_copy[i] = ids[i] - basic_count;
3209                 }
3210
3211                 if (no_basic_stat_requested)
3212                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3213                                         values, size);
3214         }
3215
3216         if (ids) {
3217                 for (i = 0; i < size; i++) {
3218                         if (ids[i] >= basic_count) {
3219                                 no_ext_stat_requested = 0;
3220                                 break;
3221                         }
3222                 }
3223         }
3224
3225         /* Fill the xstats structure */
3226         if (ids && no_ext_stat_requested)
3227                 ret = eth_basic_stats_get(port_id, xstats);
3228         else
3229                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3230
3231         if (ret < 0)
3232                 return ret;
3233         num_xstats_filled = (unsigned int)ret;
3234
3235         /* Return all stats */
3236         if (!ids) {
3237                 for (i = 0; i < num_xstats_filled; i++)
3238                         values[i] = xstats[i].value;
3239                 return expected_entries;
3240         }
3241
3242         /* Filter stats */
3243         for (i = 0; i < size; i++) {
3244                 if (ids[i] >= expected_entries) {
3245                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3246                         return -1;
3247                 }
3248                 values[i] = xstats[ids[i]].value;
3249         }
3250         return size;
3251 }
3252
3253 int
3254 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3255         unsigned int n)
3256 {
3257         struct rte_eth_dev *dev;
3258         unsigned int count = 0, i;
3259         signed int xcount = 0;
3260         uint16_t nb_rxqs, nb_txqs;
3261         int ret;
3262
3263         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3264         dev = &rte_eth_devices[port_id];
3265
3266         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3267         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3268
3269         /* Return generic statistics */
3270         count = RTE_NB_STATS;
3271         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3272                 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3273
3274         /* implemented by the driver */
3275         if (dev->dev_ops->xstats_get != NULL) {
3276                 /* Retrieve the xstats from the driver at the end of the
3277                  * xstats struct.
3278                  */
3279                 xcount = (*dev->dev_ops->xstats_get)(dev,
3280                                      xstats ? xstats + count : NULL,
3281                                      (n > count) ? n - count : 0);
3282
3283                 if (xcount < 0)
3284                         return eth_err(port_id, xcount);
3285         }
3286
3287         if (n < count + xcount || xstats == NULL)
3288                 return count + xcount;
3289
3290         /* now fill the xstats structure */
3291         ret = eth_basic_stats_get(port_id, xstats);
3292         if (ret < 0)
3293                 return ret;
3294         count = ret;
3295
3296         for (i = 0; i < count; i++)
3297                 xstats[i].id = i;
3298         /* add an offset to driver-specific stats */
3299         for ( ; i < count + xcount; i++)
3300                 xstats[i].id += count;
3301
3302         return count + xcount;
3303 }
3304
3305 /* reset ethdev extended statistics */
3306 int
3307 rte_eth_xstats_reset(uint16_t port_id)
3308 {
3309         struct rte_eth_dev *dev;
3310
3311         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3312         dev = &rte_eth_devices[port_id];
3313
3314         /* implemented by the driver */
3315         if (dev->dev_ops->xstats_reset != NULL)
3316                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3317
3318         /* fallback to default */
3319         return rte_eth_stats_reset(port_id);
3320 }
3321
3322 static int
3323 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3324                 uint8_t stat_idx, uint8_t is_rx)
3325 {
3326         struct rte_eth_dev *dev;
3327
3328         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3329         dev = &rte_eth_devices[port_id];
3330
3331         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3332                 return -EINVAL;
3333
3334         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3335                 return -EINVAL;
3336
3337         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3338                 return -EINVAL;
3339
3340         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3341         return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3342 }
3343
3344 int
3345 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3346                 uint8_t stat_idx)
3347 {
3348         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3349                                                 tx_queue_id,
3350                                                 stat_idx, STAT_QMAP_TX));
3351 }
3352
3353 int
3354 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3355                 uint8_t stat_idx)
3356 {
3357         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3358                                                 rx_queue_id,
3359                                                 stat_idx, STAT_QMAP_RX));
3360 }
3361
3362 int
3363 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3364 {
3365         struct rte_eth_dev *dev;
3366
3367         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3368         dev = &rte_eth_devices[port_id];
3369
3370         if (fw_version == NULL && fw_size > 0) {
3371                 RTE_ETHDEV_LOG(ERR,
3372                         "Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3373                         port_id);
3374                 return -EINVAL;
3375         }
3376
3377         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3378         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3379                                                         fw_version, fw_size));
3380 }
3381
3382 int
3383 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3384 {
3385         struct rte_eth_dev *dev;
3386         const struct rte_eth_desc_lim lim = {
3387                 .nb_max = UINT16_MAX,
3388                 .nb_min = 0,
3389                 .nb_align = 1,
3390                 .nb_seg_max = UINT16_MAX,
3391                 .nb_mtu_seg_max = UINT16_MAX,
3392         };
3393         int diag;
3394
3395         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3396         dev = &rte_eth_devices[port_id];
3397
3398         if (dev_info == NULL) {
3399                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3400                         port_id);
3401                 return -EINVAL;
3402         }
3403
3404         /*
3405          * Init dev_info before port_id check since caller does not have
3406          * return status and does not know if get is successful or not.
3407          */
3408         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3409         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3410
3411         dev_info->rx_desc_lim = lim;
3412         dev_info->tx_desc_lim = lim;
3413         dev_info->device = dev->device;
3414         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3415         dev_info->max_mtu = UINT16_MAX;
3416
3417         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3418         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3419         if (diag != 0) {
3420                 /* Cleanup already filled in device information */
3421                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3422                 return eth_err(port_id, diag);
3423         }
3424
3425         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3426         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3427                         RTE_MAX_QUEUES_PER_PORT);
3428         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3429                         RTE_MAX_QUEUES_PER_PORT);
3430
3431         dev_info->driver_name = dev->device->driver->name;
3432         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3433         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3434
3435         dev_info->dev_flags = &dev->data->dev_flags;
3436
3437         return 0;
3438 }
3439
3440 int
3441 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3442                                  uint32_t *ptypes, int num)
3443 {
3444         int i, j;
3445         struct rte_eth_dev *dev;
3446         const uint32_t *all_ptypes;
3447
3448         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3449         dev = &rte_eth_devices[port_id];
3450
3451         if (ptypes == NULL && num > 0) {
3452                 RTE_ETHDEV_LOG(ERR,
3453                         "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3454                         port_id);
3455                 return -EINVAL;
3456         }
3457
3458         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3459         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3460
3461         if (!all_ptypes)
3462                 return 0;
3463
3464         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3465                 if (all_ptypes[i] & ptype_mask) {
3466                         if (j < num)
3467                                 ptypes[j] = all_ptypes[i];
3468                         j++;
3469                 }
3470
3471         return j;
3472 }
3473
3474 int
3475 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3476                                  uint32_t *set_ptypes, unsigned int num)
3477 {
3478         const uint32_t valid_ptype_masks[] = {
3479                 RTE_PTYPE_L2_MASK,
3480                 RTE_PTYPE_L3_MASK,
3481                 RTE_PTYPE_L4_MASK,
3482                 RTE_PTYPE_TUNNEL_MASK,
3483                 RTE_PTYPE_INNER_L2_MASK,
3484                 RTE_PTYPE_INNER_L3_MASK,
3485                 RTE_PTYPE_INNER_L4_MASK,
3486         };
3487         const uint32_t *all_ptypes;
3488         struct rte_eth_dev *dev;
3489         uint32_t unused_mask;
3490         unsigned int i, j;
3491         int ret;
3492
3493         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3494         dev = &rte_eth_devices[port_id];
3495
3496         if (num > 0 && set_ptypes == NULL) {
3497                 RTE_ETHDEV_LOG(ERR,
3498                         "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3499                         port_id);
3500                 return -EINVAL;
3501         }
3502
3503         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3504                         *dev->dev_ops->dev_ptypes_set == NULL) {
3505                 ret = 0;
3506                 goto ptype_unknown;
3507         }
3508
3509         if (ptype_mask == 0) {
3510                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3511                                 ptype_mask);
3512                 goto ptype_unknown;
3513         }
3514
3515         unused_mask = ptype_mask;
3516         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3517                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3518                 if (mask && mask != valid_ptype_masks[i]) {
3519                         ret = -EINVAL;
3520                         goto ptype_unknown;
3521                 }
3522                 unused_mask &= ~valid_ptype_masks[i];
3523         }
3524
3525         if (unused_mask) {
3526                 ret = -EINVAL;
3527                 goto ptype_unknown;
3528         }
3529
3530         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3531         if (all_ptypes == NULL) {
3532                 ret = 0;
3533                 goto ptype_unknown;
3534         }
3535
3536         /*
3537          * Accommodate as many set_ptypes as possible. If the supplied
3538          * set_ptypes array is insufficient fill it partially.
3539          */
3540         for (i = 0, j = 0; set_ptypes != NULL &&
3541                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3542                 if (ptype_mask & all_ptypes[i]) {
3543                         if (j < num - 1) {
3544                                 set_ptypes[j] = all_ptypes[i];
3545                                 j++;
3546                                 continue;
3547                         }
3548                         break;
3549                 }
3550         }
3551
3552         if (set_ptypes != NULL && j < num)
3553                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3554
3555         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3556
3557 ptype_unknown:
3558         if (num > 0)
3559                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3560
3561         return ret;
3562 }
3563
3564 int
3565 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3566 {
3567         struct rte_eth_dev *dev;
3568
3569         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3570         dev = &rte_eth_devices[port_id];
3571
3572         if (mac_addr == NULL) {
3573                 RTE_ETHDEV_LOG(ERR,
3574                         "Cannot get ethdev port %u MAC address to NULL\n",
3575                         port_id);
3576                 return -EINVAL;
3577         }
3578
3579         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3580
3581         return 0;
3582 }
3583
3584 int
3585 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3586 {
3587         struct rte_eth_dev *dev;
3588
3589         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3590         dev = &rte_eth_devices[port_id];
3591
3592         if (mtu == NULL) {
3593                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3594                         port_id);
3595                 return -EINVAL;
3596         }
3597
3598         *mtu = dev->data->mtu;
3599         return 0;
3600 }
3601
3602 int
3603 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3604 {
3605         int ret;
3606         struct rte_eth_dev_info dev_info;
3607         struct rte_eth_dev *dev;
3608
3609         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3610         dev = &rte_eth_devices[port_id];
3611         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3612
3613         /*
3614          * Check if the device supports dev_infos_get, if it does not
3615          * skip min_mtu/max_mtu validation here as this requires values
3616          * that are populated within the call to rte_eth_dev_info_get()
3617          * which relies on dev->dev_ops->dev_infos_get.
3618          */
3619         if (*dev->dev_ops->dev_infos_get != NULL) {
3620                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3621                 if (ret != 0)
3622                         return ret;
3623
3624                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3625                         return -EINVAL;
3626         }
3627
3628         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3629         if (!ret)
3630                 dev->data->mtu = mtu;
3631
3632         return eth_err(port_id, ret);
3633 }
3634
3635 int
3636 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3637 {
3638         struct rte_eth_dev *dev;
3639         int ret;
3640
3641         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3642         dev = &rte_eth_devices[port_id];
3643
3644         if (!(dev->data->dev_conf.rxmode.offloads &
3645               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3646                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3647                         port_id);
3648                 return -ENOSYS;
3649         }
3650
3651         if (vlan_id > 4095) {
3652                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3653                         port_id, vlan_id);
3654                 return -EINVAL;
3655         }
3656         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3657
3658         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3659         if (ret == 0) {
3660                 struct rte_vlan_filter_conf *vfc;
3661                 int vidx;
3662                 int vbit;
3663
3664                 vfc = &dev->data->vlan_filter_conf;
3665                 vidx = vlan_id / 64;
3666                 vbit = vlan_id % 64;
3667
3668                 if (on)
3669                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3670                 else
3671                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3672         }
3673
3674         return eth_err(port_id, ret);
3675 }
3676
3677 int
3678 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3679                                     int on)
3680 {
3681         struct rte_eth_dev *dev;
3682
3683         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3684         dev = &rte_eth_devices[port_id];
3685
3686         if (rx_queue_id >= dev->data->nb_rx_queues) {
3687                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3688                 return -EINVAL;
3689         }
3690
3691         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3692         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3693
3694         return 0;
3695 }
3696
3697 int
3698 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3699                                 enum rte_vlan_type vlan_type,
3700                                 uint16_t tpid)
3701 {
3702         struct rte_eth_dev *dev;
3703
3704         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3705         dev = &rte_eth_devices[port_id];
3706
3707         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3708         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3709                                                                tpid));
3710 }
3711
3712 int
3713 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3714 {
3715         struct rte_eth_dev_info dev_info;
3716         struct rte_eth_dev *dev;
3717         int ret = 0;
3718         int mask = 0;
3719         int cur, org = 0;
3720         uint64_t orig_offloads;
3721         uint64_t dev_offloads;
3722         uint64_t new_offloads;
3723
3724         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3725         dev = &rte_eth_devices[port_id];
3726
3727         /* save original values in case of failure */
3728         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3729         dev_offloads = orig_offloads;
3730
3731         /* check which option changed by application */
3732         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3733         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3734         if (cur != org) {
3735                 if (cur)
3736                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3737                 else
3738                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3739                 mask |= ETH_VLAN_STRIP_MASK;
3740         }
3741
3742         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3743         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3744         if (cur != org) {
3745                 if (cur)
3746                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3747                 else
3748                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3749                 mask |= ETH_VLAN_FILTER_MASK;
3750         }
3751
3752         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3753         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3754         if (cur != org) {
3755                 if (cur)
3756                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3757                 else
3758                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3759                 mask |= ETH_VLAN_EXTEND_MASK;
3760         }
3761
3762         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3763         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3764         if (cur != org) {
3765                 if (cur)
3766                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3767                 else
3768                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3769                 mask |= ETH_QINQ_STRIP_MASK;
3770         }
3771
3772         /*no change*/
3773         if (mask == 0)
3774                 return ret;
3775
3776         ret = rte_eth_dev_info_get(port_id, &dev_info);
3777         if (ret != 0)
3778                 return ret;
3779
3780         /* Rx VLAN offloading must be within its device capabilities */
3781         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3782                 new_offloads = dev_offloads & ~orig_offloads;
3783                 RTE_ETHDEV_LOG(ERR,
3784                         "Ethdev port_id=%u requested new added VLAN offloads "
3785                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3786                         "0x%" PRIx64 " in %s()\n",
3787                         port_id, new_offloads, dev_info.rx_offload_capa,
3788                         __func__);
3789                 return -EINVAL;
3790         }
3791
3792         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3793         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3794         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3795         if (ret) {
3796                 /* hit an error restore  original values */
3797                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3798         }
3799
3800         return eth_err(port_id, ret);
3801 }
3802
3803 int
3804 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3805 {
3806         struct rte_eth_dev *dev;
3807         uint64_t *dev_offloads;
3808         int ret = 0;
3809
3810         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3811         dev = &rte_eth_devices[port_id];
3812         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3813
3814         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3815                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3816
3817         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3818                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3819
3820         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3821                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3822
3823         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3824                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3825
3826         return ret;
3827 }
3828
3829 int
3830 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3831 {
3832         struct rte_eth_dev *dev;
3833
3834         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3835         dev = &rte_eth_devices[port_id];
3836
3837         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3838         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3839 }
3840
3841 int
3842 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3843 {
3844         struct rte_eth_dev *dev;
3845
3846         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3847         dev = &rte_eth_devices[port_id];
3848
3849         if (fc_conf == NULL) {
3850                 RTE_ETHDEV_LOG(ERR,
3851                         "Cannot get ethdev port %u flow control config to NULL\n",
3852                         port_id);
3853                 return -EINVAL;
3854         }
3855
3856         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3857         memset(fc_conf, 0, sizeof(*fc_conf));
3858         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3859 }
3860
3861 int
3862 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3863 {
3864         struct rte_eth_dev *dev;
3865
3866         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3867         dev = &rte_eth_devices[port_id];
3868
3869         if (fc_conf == NULL) {
3870                 RTE_ETHDEV_LOG(ERR,
3871                         "Cannot set ethdev port %u flow control from NULL config\n",
3872                         port_id);
3873                 return -EINVAL;
3874         }
3875
3876         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3877                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3878                 return -EINVAL;
3879         }
3880
3881         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3882         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3883 }
3884
3885 int
3886 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3887                                    struct rte_eth_pfc_conf *pfc_conf)
3888 {
3889         struct rte_eth_dev *dev;
3890
3891         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3892         dev = &rte_eth_devices[port_id];
3893
3894         if (pfc_conf == NULL) {
3895                 RTE_ETHDEV_LOG(ERR,
3896                         "Cannot set ethdev port %u priority flow control from NULL config\n",
3897                         port_id);
3898                 return -EINVAL;
3899         }
3900
3901         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3902                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3903                 return -EINVAL;
3904         }
3905
3906         /* High water, low water validation are device specific */
3907         if  (*dev->dev_ops->priority_flow_ctrl_set)
3908                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3909                                         (dev, pfc_conf));
3910         return -ENOTSUP;
3911 }
3912
3913 static int
3914 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3915                         uint16_t reta_size)
3916 {
3917         uint16_t i, num;
3918
3919         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3920         for (i = 0; i < num; i++) {
3921                 if (reta_conf[i].mask)
3922                         return 0;
3923         }
3924
3925         return -EINVAL;
3926 }
3927
3928 static int
3929 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3930                          uint16_t reta_size,
3931                          uint16_t max_rxq)
3932 {
3933         uint16_t i, idx, shift;
3934
3935         if (max_rxq == 0) {
3936                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3937                 return -EINVAL;
3938         }
3939
3940         for (i = 0; i < reta_size; i++) {
3941                 idx = i / RTE_RETA_GROUP_SIZE;
3942                 shift = i % RTE_RETA_GROUP_SIZE;
3943                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3944                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3945                         RTE_ETHDEV_LOG(ERR,
3946                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3947                                 idx, shift,
3948                                 reta_conf[idx].reta[shift], max_rxq);
3949                         return -EINVAL;
3950                 }
3951         }
3952
3953         return 0;
3954 }
3955
3956 int
3957 rte_eth_dev_rss_reta_update(uint16_t port_id,
3958                             struct rte_eth_rss_reta_entry64 *reta_conf,
3959                             uint16_t reta_size)
3960 {
3961         struct rte_eth_dev *dev;
3962         int ret;
3963
3964         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3965         dev = &rte_eth_devices[port_id];
3966
3967         if (reta_conf == NULL) {
3968                 RTE_ETHDEV_LOG(ERR,
3969                         "Cannot update ethdev port %u RSS RETA to NULL\n",
3970                         port_id);
3971                 return -EINVAL;
3972         }
3973
3974         if (reta_size == 0) {
3975                 RTE_ETHDEV_LOG(ERR,
3976                         "Cannot update ethdev port %u RSS RETA with zero size\n",
3977                         port_id);
3978                 return -EINVAL;
3979         }
3980
3981         /* Check mask bits */
3982         ret = eth_check_reta_mask(reta_conf, reta_size);
3983         if (ret < 0)
3984                 return ret;
3985
3986         /* Check entry value */
3987         ret = eth_check_reta_entry(reta_conf, reta_size,
3988                                 dev->data->nb_rx_queues);
3989         if (ret < 0)
3990                 return ret;
3991
3992         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3993         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3994                                                              reta_size));
3995 }
3996
3997 int
3998 rte_eth_dev_rss_reta_query(uint16_t port_id,
3999                            struct rte_eth_rss_reta_entry64 *reta_conf,
4000                            uint16_t reta_size)
4001 {
4002         struct rte_eth_dev *dev;
4003         int ret;
4004
4005         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4006         dev = &rte_eth_devices[port_id];
4007
4008         if (reta_conf == NULL) {
4009                 RTE_ETHDEV_LOG(ERR,
4010                         "Cannot query ethdev port %u RSS RETA from NULL config\n",
4011                         port_id);
4012                 return -EINVAL;
4013         }
4014
4015         /* Check mask bits */
4016         ret = eth_check_reta_mask(reta_conf, reta_size);
4017         if (ret < 0)
4018                 return ret;
4019
4020         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
4021         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4022                                                             reta_size));
4023 }
4024
4025 int
4026 rte_eth_dev_rss_hash_update(uint16_t port_id,
4027                             struct rte_eth_rss_conf *rss_conf)
4028 {
4029         struct rte_eth_dev *dev;
4030         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
4031         int ret;
4032
4033         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4034         dev = &rte_eth_devices[port_id];
4035
4036         if (rss_conf == NULL) {
4037                 RTE_ETHDEV_LOG(ERR,
4038                         "Cannot update ethdev port %u RSS hash from NULL config\n",
4039                         port_id);
4040                 return -EINVAL;
4041         }
4042
4043         ret = rte_eth_dev_info_get(port_id, &dev_info);
4044         if (ret != 0)
4045                 return ret;
4046
4047         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4048         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4049             dev_info.flow_type_rss_offloads) {
4050                 RTE_ETHDEV_LOG(ERR,
4051                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4052                         port_id, rss_conf->rss_hf,
4053                         dev_info.flow_type_rss_offloads);
4054                 return -EINVAL;
4055         }
4056         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
4057         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4058                                                                  rss_conf));
4059 }
4060
4061 int
4062 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4063                               struct rte_eth_rss_conf *rss_conf)
4064 {
4065         struct rte_eth_dev *dev;
4066
4067         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4068         dev = &rte_eth_devices[port_id];
4069
4070         if (rss_conf == NULL) {
4071                 RTE_ETHDEV_LOG(ERR,
4072                         "Cannot get ethdev port %u RSS hash config to NULL\n",
4073                         port_id);
4074                 return -EINVAL;
4075         }
4076
4077         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
4078         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4079                                                                    rss_conf));
4080 }
4081
4082 int
4083 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4084                                 struct rte_eth_udp_tunnel *udp_tunnel)
4085 {
4086         struct rte_eth_dev *dev;
4087
4088         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4089         dev = &rte_eth_devices[port_id];
4090
4091         if (udp_tunnel == NULL) {
4092                 RTE_ETHDEV_LOG(ERR,
4093                         "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4094                         port_id);
4095                 return -EINVAL;
4096         }
4097
4098         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
4099                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4100                 return -EINVAL;
4101         }
4102
4103         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
4104         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4105                                                                 udp_tunnel));
4106 }
4107
4108 int
4109 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4110                                    struct rte_eth_udp_tunnel *udp_tunnel)
4111 {
4112         struct rte_eth_dev *dev;
4113
4114         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4115         dev = &rte_eth_devices[port_id];
4116
4117         if (udp_tunnel == NULL) {
4118                 RTE_ETHDEV_LOG(ERR,
4119                         "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4120                         port_id);
4121                 return -EINVAL;
4122         }
4123
4124         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
4125                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4126                 return -EINVAL;
4127         }
4128
4129         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
4130         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4131                                                                 udp_tunnel));
4132 }
4133
4134 int
4135 rte_eth_led_on(uint16_t port_id)
4136 {
4137         struct rte_eth_dev *dev;
4138
4139         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4140         dev = &rte_eth_devices[port_id];
4141
4142         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
4143         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4144 }
4145
4146 int
4147 rte_eth_led_off(uint16_t port_id)
4148 {
4149         struct rte_eth_dev *dev;
4150
4151         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4152         dev = &rte_eth_devices[port_id];
4153
4154         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
4155         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4156 }
4157
4158 int
4159 rte_eth_fec_get_capability(uint16_t port_id,
4160                            struct rte_eth_fec_capa *speed_fec_capa,
4161                            unsigned int num)
4162 {
4163         struct rte_eth_dev *dev;
4164         int ret;
4165
4166         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4167         dev = &rte_eth_devices[port_id];
4168
4169         if (speed_fec_capa == NULL && num > 0) {
4170                 RTE_ETHDEV_LOG(ERR,
4171                         "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4172                         port_id);
4173                 return -EINVAL;
4174         }
4175
4176         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
4177         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4178
4179         return ret;
4180 }
4181
4182 int
4183 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4184 {
4185         struct rte_eth_dev *dev;
4186
4187         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4188         dev = &rte_eth_devices[port_id];
4189
4190         if (fec_capa == NULL) {
4191                 RTE_ETHDEV_LOG(ERR,
4192                         "Cannot get ethdev port %u current FEC mode to NULL\n",
4193                         port_id);
4194                 return -EINVAL;
4195         }
4196
4197         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
4198         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4199 }
4200
4201 int
4202 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4203 {
4204         struct rte_eth_dev *dev;
4205
4206         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4207         dev = &rte_eth_devices[port_id];
4208
4209         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
4210         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4211 }
4212
4213 /*
4214  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4215  * an empty spot.
4216  */
4217 static int
4218 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4219 {
4220         struct rte_eth_dev_info dev_info;
4221         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4222         unsigned i;
4223         int ret;
4224
4225         ret = rte_eth_dev_info_get(port_id, &dev_info);
4226         if (ret != 0)
4227                 return -1;
4228
4229         for (i = 0; i < dev_info.max_mac_addrs; i++)
4230                 if (memcmp(addr, &dev->data->mac_addrs[i],
4231                                 RTE_ETHER_ADDR_LEN) == 0)
4232                         return i;
4233
4234         return -1;
4235 }
4236
4237 static const struct rte_ether_addr null_mac_addr;
4238
4239 int
4240 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4241                         uint32_t pool)
4242 {
4243         struct rte_eth_dev *dev;
4244         int index;
4245         uint64_t pool_mask;
4246         int ret;
4247
4248         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4249         dev = &rte_eth_devices[port_id];
4250
4251         if (addr == NULL) {
4252                 RTE_ETHDEV_LOG(ERR,
4253                         "Cannot add ethdev port %u MAC address from NULL address\n",
4254                         port_id);
4255                 return -EINVAL;
4256         }
4257
4258         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4259
4260         if (rte_is_zero_ether_addr(addr)) {
4261                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4262                         port_id);
4263                 return -EINVAL;
4264         }
4265         if (pool >= ETH_64_POOLS) {
4266                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
4267                 return -EINVAL;
4268         }
4269
4270         index = eth_dev_get_mac_addr_index(port_id, addr);
4271         if (index < 0) {
4272                 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4273                 if (index < 0) {
4274                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4275                                 port_id);
4276                         return -ENOSPC;
4277                 }
4278         } else {
4279                 pool_mask = dev->data->mac_pool_sel[index];
4280
4281                 /* Check if both MAC address and pool is already there, and do nothing */
4282                 if (pool_mask & (1ULL << pool))
4283                         return 0;
4284         }
4285
4286         /* Update NIC */
4287         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4288
4289         if (ret == 0) {
4290                 /* Update address in NIC data structure */
4291                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4292
4293                 /* Update pool bitmap in NIC data structure */
4294                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
4295         }
4296
4297         return eth_err(port_id, ret);
4298 }
4299
4300 int
4301 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4302 {
4303         struct rte_eth_dev *dev;
4304         int index;
4305
4306         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4307         dev = &rte_eth_devices[port_id];
4308
4309         if (addr == NULL) {
4310                 RTE_ETHDEV_LOG(ERR,
4311                         "Cannot remove ethdev port %u MAC address from NULL address\n",
4312                         port_id);
4313                 return -EINVAL;
4314         }
4315
4316         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4317
4318         index = eth_dev_get_mac_addr_index(port_id, addr);
4319         if (index == 0) {
4320                 RTE_ETHDEV_LOG(ERR,
4321                         "Port %u: Cannot remove default MAC address\n",
4322                         port_id);
4323                 return -EADDRINUSE;
4324         } else if (index < 0)
4325                 return 0;  /* Do nothing if address wasn't found */
4326
4327         /* Update NIC */
4328         (*dev->dev_ops->mac_addr_remove)(dev, index);
4329
4330         /* Update address in NIC data structure */
4331         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4332
4333         /* reset pool bitmap */
4334         dev->data->mac_pool_sel[index] = 0;
4335
4336         return 0;
4337 }
4338
4339 int
4340 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4341 {
4342         struct rte_eth_dev *dev;
4343         int ret;
4344
4345         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4346         dev = &rte_eth_devices[port_id];
4347
4348         if (addr == NULL) {
4349                 RTE_ETHDEV_LOG(ERR,
4350                         "Cannot set ethdev port %u default MAC address from NULL address\n",
4351                         port_id);
4352                 return -EINVAL;
4353         }
4354
4355         if (!rte_is_valid_assigned_ether_addr(addr))
4356                 return -EINVAL;
4357
4358         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4359
4360         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4361         if (ret < 0)
4362                 return ret;
4363
4364         /* Update default address in NIC data structure */
4365         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4366
4367         return 0;
4368 }
4369
4370
4371 /*
4372  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4373  * an empty spot.
4374  */
4375 static int
4376 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4377                 const struct rte_ether_addr *addr)
4378 {
4379         struct rte_eth_dev_info dev_info;
4380         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4381         unsigned i;
4382         int ret;
4383
4384         ret = rte_eth_dev_info_get(port_id, &dev_info);
4385         if (ret != 0)
4386                 return -1;
4387
4388         if (!dev->data->hash_mac_addrs)
4389                 return -1;
4390
4391         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4392                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4393                         RTE_ETHER_ADDR_LEN) == 0)
4394                         return i;
4395
4396         return -1;
4397 }
4398
4399 int
4400 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4401                                 uint8_t on)
4402 {
4403         int index;
4404         int ret;
4405         struct rte_eth_dev *dev;
4406
4407         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4408         dev = &rte_eth_devices[port_id];
4409
4410         if (addr == NULL) {
4411                 RTE_ETHDEV_LOG(ERR,
4412                         "Cannot set ethdev port %u unicast hash table from NULL address\n",
4413                         port_id);
4414                 return -EINVAL;
4415         }
4416
4417         if (rte_is_zero_ether_addr(addr)) {
4418                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4419                         port_id);
4420                 return -EINVAL;
4421         }
4422
4423         index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4424         /* Check if it's already there, and do nothing */
4425         if ((index >= 0) && on)
4426                 return 0;
4427
4428         if (index < 0) {
4429                 if (!on) {
4430                         RTE_ETHDEV_LOG(ERR,
4431                                 "Port %u: the MAC address was not set in UTA\n",
4432                                 port_id);
4433                         return -EINVAL;
4434                 }
4435
4436                 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4437                 if (index < 0) {
4438                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4439                                 port_id);
4440                         return -ENOSPC;
4441                 }
4442         }
4443
4444         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4445         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4446         if (ret == 0) {
4447                 /* Update address in NIC data structure */
4448                 if (on)
4449                         rte_ether_addr_copy(addr,
4450                                         &dev->data->hash_mac_addrs[index]);
4451                 else
4452                         rte_ether_addr_copy(&null_mac_addr,
4453                                         &dev->data->hash_mac_addrs[index]);
4454         }
4455
4456         return eth_err(port_id, ret);
4457 }
4458
4459 int
4460 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4461 {
4462         struct rte_eth_dev *dev;
4463
4464         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4465         dev = &rte_eth_devices[port_id];
4466
4467         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4468         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4469                                                                        on));
4470 }
4471
4472 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4473                                         uint16_t tx_rate)
4474 {
4475         struct rte_eth_dev *dev;
4476         struct rte_eth_dev_info dev_info;
4477         struct rte_eth_link link;
4478         int ret;
4479
4480         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4481         dev = &rte_eth_devices[port_id];
4482
4483         ret = rte_eth_dev_info_get(port_id, &dev_info);
4484         if (ret != 0)
4485                 return ret;
4486
4487         link = dev->data->dev_link;
4488
4489         if (queue_idx > dev_info.max_tx_queues) {
4490                 RTE_ETHDEV_LOG(ERR,
4491                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4492                         port_id, queue_idx);
4493                 return -EINVAL;
4494         }
4495
4496         if (tx_rate > link.link_speed) {
4497                 RTE_ETHDEV_LOG(ERR,
4498                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4499                         tx_rate, link.link_speed);
4500                 return -EINVAL;
4501         }
4502
4503         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4504         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4505                                                         queue_idx, tx_rate));
4506 }
4507
4508 int
4509 rte_eth_mirror_rule_set(uint16_t port_id,
4510                         struct rte_eth_mirror_conf *mirror_conf,
4511                         uint8_t rule_id, uint8_t on)
4512 {
4513         struct rte_eth_dev *dev;
4514
4515         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4516         dev = &rte_eth_devices[port_id];
4517
4518         if (mirror_conf == NULL) {
4519                 RTE_ETHDEV_LOG(ERR,
4520                         "Cannot set ethdev port %u mirror rule from NULL config\n",
4521                         port_id);
4522                 return -EINVAL;
4523         }
4524
4525         if (mirror_conf->rule_type == 0) {
4526                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4527                 return -EINVAL;
4528         }
4529
4530         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4531                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4532                         ETH_64_POOLS - 1);
4533                 return -EINVAL;
4534         }
4535
4536         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4537              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4538             (mirror_conf->pool_mask == 0)) {
4539                 RTE_ETHDEV_LOG(ERR,
4540                         "Invalid mirror pool, pool mask can not be 0\n");
4541                 return -EINVAL;
4542         }
4543
4544         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4545             mirror_conf->vlan.vlan_mask == 0) {
4546                 RTE_ETHDEV_LOG(ERR,
4547                         "Invalid vlan mask, vlan mask can not be 0\n");
4548                 return -EINVAL;
4549         }
4550
4551         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4552
4553         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4554                                                 mirror_conf, rule_id, on));
4555 }
4556
4557 int
4558 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4559 {
4560         struct rte_eth_dev *dev;
4561
4562         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4563         dev = &rte_eth_devices[port_id];
4564
4565         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4566         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev, rule_id));
4567 }
4568
4569 RTE_INIT(eth_dev_init_cb_lists)
4570 {
4571         uint16_t i;
4572
4573         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4574                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4575 }
4576
4577 int
4578 rte_eth_dev_callback_register(uint16_t port_id,
4579                         enum rte_eth_event_type event,
4580                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4581 {
4582         struct rte_eth_dev *dev;
4583         struct rte_eth_dev_callback *user_cb;
4584         uint16_t next_port;
4585         uint16_t last_port;
4586
4587         if (cb_fn == NULL) {
4588                 RTE_ETHDEV_LOG(ERR,
4589                         "Cannot register ethdev port %u callback from NULL\n",
4590                         port_id);
4591                 return -EINVAL;
4592         }
4593
4594         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4595                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4596                 return -EINVAL;
4597         }
4598
4599         if (port_id == RTE_ETH_ALL) {
4600                 next_port = 0;
4601                 last_port = RTE_MAX_ETHPORTS - 1;
4602         } else {
4603                 next_port = last_port = port_id;
4604         }
4605
4606         rte_spinlock_lock(&eth_dev_cb_lock);
4607
4608         do {
4609                 dev = &rte_eth_devices[next_port];
4610
4611                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4612                         if (user_cb->cb_fn == cb_fn &&
4613                                 user_cb->cb_arg == cb_arg &&
4614                                 user_cb->event == event) {
4615                                 break;
4616                         }
4617                 }
4618
4619                 /* create a new callback. */
4620                 if (user_cb == NULL) {
4621                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4622                                 sizeof(struct rte_eth_dev_callback), 0);
4623                         if (user_cb != NULL) {
4624                                 user_cb->cb_fn = cb_fn;
4625                                 user_cb->cb_arg = cb_arg;
4626                                 user_cb->event = event;
4627                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4628                                                   user_cb, next);
4629                         } else {
4630                                 rte_spinlock_unlock(&eth_dev_cb_lock);
4631                                 rte_eth_dev_callback_unregister(port_id, event,
4632                                                                 cb_fn, cb_arg);
4633                                 return -ENOMEM;
4634                         }
4635
4636                 }
4637         } while (++next_port <= last_port);
4638
4639         rte_spinlock_unlock(&eth_dev_cb_lock);
4640         return 0;
4641 }
4642
4643 int
4644 rte_eth_dev_callback_unregister(uint16_t port_id,
4645                         enum rte_eth_event_type event,
4646                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4647 {
4648         int ret;
4649         struct rte_eth_dev *dev;
4650         struct rte_eth_dev_callback *cb, *next;
4651         uint16_t next_port;
4652         uint16_t last_port;
4653
4654         if (cb_fn == NULL) {
4655                 RTE_ETHDEV_LOG(ERR,
4656                         "Cannot unregister ethdev port %u callback from NULL\n",
4657                         port_id);
4658                 return -EINVAL;
4659         }
4660
4661         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4662                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4663                 return -EINVAL;
4664         }
4665
4666         if (port_id == RTE_ETH_ALL) {
4667                 next_port = 0;
4668                 last_port = RTE_MAX_ETHPORTS - 1;
4669         } else {
4670                 next_port = last_port = port_id;
4671         }
4672
4673         rte_spinlock_lock(&eth_dev_cb_lock);
4674
4675         do {
4676                 dev = &rte_eth_devices[next_port];
4677                 ret = 0;
4678                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4679                      cb = next) {
4680
4681                         next = TAILQ_NEXT(cb, next);
4682
4683                         if (cb->cb_fn != cb_fn || cb->event != event ||
4684                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4685                                 continue;
4686
4687                         /*
4688                          * if this callback is not executing right now,
4689                          * then remove it.
4690                          */
4691                         if (cb->active == 0) {
4692                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4693                                 rte_free(cb);
4694                         } else {
4695                                 ret = -EAGAIN;
4696                         }
4697                 }
4698         } while (++next_port <= last_port);
4699
4700         rte_spinlock_unlock(&eth_dev_cb_lock);
4701         return ret;
4702 }
4703
4704 int
4705 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4706         enum rte_eth_event_type event, void *ret_param)
4707 {
4708         struct rte_eth_dev_callback *cb_lst;
4709         struct rte_eth_dev_callback dev_cb;
4710         int rc = 0;
4711
4712         rte_spinlock_lock(&eth_dev_cb_lock);
4713         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4714                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4715                         continue;
4716                 dev_cb = *cb_lst;
4717                 cb_lst->active = 1;
4718                 if (ret_param != NULL)
4719                         dev_cb.ret_param = ret_param;
4720
4721                 rte_spinlock_unlock(&eth_dev_cb_lock);
4722                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4723                                 dev_cb.cb_arg, dev_cb.ret_param);
4724                 rte_spinlock_lock(&eth_dev_cb_lock);
4725                 cb_lst->active = 0;
4726         }
4727         rte_spinlock_unlock(&eth_dev_cb_lock);
4728         return rc;
4729 }
4730
4731 void
4732 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4733 {
4734         if (dev == NULL)
4735                 return;
4736
4737         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4738
4739         dev->state = RTE_ETH_DEV_ATTACHED;
4740 }
4741
4742 int
4743 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4744 {
4745         uint32_t vec;
4746         struct rte_eth_dev *dev;
4747         struct rte_intr_handle *intr_handle;
4748         uint16_t qid;
4749         int rc;
4750
4751         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4752         dev = &rte_eth_devices[port_id];
4753
4754         if (!dev->intr_handle) {
4755                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4756                 return -ENOTSUP;
4757         }
4758
4759         intr_handle = dev->intr_handle;
4760         if (!intr_handle->intr_vec) {
4761                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4762                 return -EPERM;
4763         }
4764
4765         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4766                 vec = intr_handle->intr_vec[qid];
4767                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4768                 if (rc && rc != -EEXIST) {
4769                         RTE_ETHDEV_LOG(ERR,
4770                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4771                                 port_id, qid, op, epfd, vec);
4772                 }
4773         }
4774
4775         return 0;
4776 }
4777
4778 int
4779 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4780 {
4781         struct rte_intr_handle *intr_handle;
4782         struct rte_eth_dev *dev;
4783         unsigned int efd_idx;
4784         uint32_t vec;
4785         int fd;
4786
4787         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4788         dev = &rte_eth_devices[port_id];
4789
4790         if (queue_id >= dev->data->nb_rx_queues) {
4791                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4792                 return -1;
4793         }
4794
4795         if (!dev->intr_handle) {
4796                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4797                 return -1;
4798         }
4799
4800         intr_handle = dev->intr_handle;
4801         if (!intr_handle->intr_vec) {
4802                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4803                 return -1;
4804         }
4805
4806         vec = intr_handle->intr_vec[queue_id];
4807         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4808                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4809         fd = intr_handle->efds[efd_idx];
4810
4811         return fd;
4812 }
4813
4814 static inline int
4815 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4816                 const char *ring_name)
4817 {
4818         return snprintf(name, len, "eth_p%d_q%d_%s",
4819                         port_id, queue_id, ring_name);
4820 }
4821
4822 const struct rte_memzone *
4823 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4824                          uint16_t queue_id, size_t size, unsigned align,
4825                          int socket_id)
4826 {
4827         char z_name[RTE_MEMZONE_NAMESIZE];
4828         const struct rte_memzone *mz;
4829         int rc;
4830
4831         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4832                         queue_id, ring_name);
4833         if (rc >= RTE_MEMZONE_NAMESIZE) {
4834                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4835                 rte_errno = ENAMETOOLONG;
4836                 return NULL;
4837         }
4838
4839         mz = rte_memzone_lookup(z_name);
4840         if (mz) {
4841                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4842                                 size > mz->len ||
4843                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4844                         RTE_ETHDEV_LOG(ERR,
4845                                 "memzone %s does not justify the requested attributes\n",
4846                                 mz->name);
4847                         return NULL;
4848                 }
4849
4850                 return mz;
4851         }
4852
4853         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4854                         RTE_MEMZONE_IOVA_CONTIG, align);
4855 }
4856
4857 int
4858 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4859                 uint16_t queue_id)
4860 {
4861         char z_name[RTE_MEMZONE_NAMESIZE];
4862         const struct rte_memzone *mz;
4863         int rc = 0;
4864
4865         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4866                         queue_id, ring_name);
4867         if (rc >= RTE_MEMZONE_NAMESIZE) {
4868                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4869                 return -ENAMETOOLONG;
4870         }
4871
4872         mz = rte_memzone_lookup(z_name);
4873         if (mz)
4874                 rc = rte_memzone_free(mz);
4875         else
4876                 rc = -ENOENT;
4877
4878         return rc;
4879 }
4880
4881 int
4882 rte_eth_dev_create(struct rte_device *device, const char *name,
4883         size_t priv_data_size,
4884         ethdev_bus_specific_init ethdev_bus_specific_init,
4885         void *bus_init_params,
4886         ethdev_init_t ethdev_init, void *init_params)
4887 {
4888         struct rte_eth_dev *ethdev;
4889         int retval;
4890
4891         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4892
4893         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4894                 ethdev = rte_eth_dev_allocate(name);
4895                 if (!ethdev)
4896                         return -ENODEV;
4897
4898                 if (priv_data_size) {
4899                         ethdev->data->dev_private = rte_zmalloc_socket(
4900                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4901                                 device->numa_node);
4902
4903                         if (!ethdev->data->dev_private) {
4904                                 RTE_ETHDEV_LOG(ERR,
4905                                         "failed to allocate private data\n");
4906                                 retval = -ENOMEM;
4907                                 goto probe_failed;
4908                         }
4909                 }
4910         } else {
4911                 ethdev = rte_eth_dev_attach_secondary(name);
4912                 if (!ethdev) {
4913                         RTE_ETHDEV_LOG(ERR,
4914                                 "secondary process attach failed, ethdev doesn't exist\n");
4915                         return  -ENODEV;
4916                 }
4917         }
4918
4919         ethdev->device = device;
4920
4921         if (ethdev_bus_specific_init) {
4922                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4923                 if (retval) {
4924                         RTE_ETHDEV_LOG(ERR,
4925                                 "ethdev bus specific initialisation failed\n");
4926                         goto probe_failed;
4927                 }
4928         }
4929
4930         retval = ethdev_init(ethdev, init_params);
4931         if (retval) {
4932                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4933                 goto probe_failed;
4934         }
4935
4936         rte_eth_dev_probing_finish(ethdev);
4937
4938         return retval;
4939
4940 probe_failed:
4941         rte_eth_dev_release_port(ethdev);
4942         return retval;
4943 }
4944
4945 int
4946 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4947         ethdev_uninit_t ethdev_uninit)
4948 {
4949         int ret;
4950
4951         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4952         if (!ethdev)
4953                 return -ENODEV;
4954
4955         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4956
4957         ret = ethdev_uninit(ethdev);
4958         if (ret)
4959                 return ret;
4960
4961         return rte_eth_dev_release_port(ethdev);
4962 }
4963
4964 int
4965 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4966                           int epfd, int op, void *data)
4967 {
4968         uint32_t vec;
4969         struct rte_eth_dev *dev;
4970         struct rte_intr_handle *intr_handle;
4971         int rc;
4972
4973         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4974         dev = &rte_eth_devices[port_id];
4975
4976         if (queue_id >= dev->data->nb_rx_queues) {
4977                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4978                 return -EINVAL;
4979         }
4980
4981         if (!dev->intr_handle) {
4982                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4983                 return -ENOTSUP;
4984         }
4985
4986         intr_handle = dev->intr_handle;
4987         if (!intr_handle->intr_vec) {
4988                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4989                 return -EPERM;
4990         }
4991
4992         vec = intr_handle->intr_vec[queue_id];
4993         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4994         if (rc && rc != -EEXIST) {
4995                 RTE_ETHDEV_LOG(ERR,
4996                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4997                         port_id, queue_id, op, epfd, vec);
4998                 return rc;
4999         }
5000
5001         return 0;
5002 }
5003
5004 int
5005 rte_eth_dev_rx_intr_enable(uint16_t port_id,
5006                            uint16_t queue_id)
5007 {
5008         struct rte_eth_dev *dev;
5009         int ret;
5010
5011         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5012         dev = &rte_eth_devices[port_id];
5013
5014         ret = eth_dev_validate_rx_queue(dev, queue_id);
5015         if (ret != 0)
5016                 return ret;
5017
5018         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
5019         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
5020 }
5021
5022 int
5023 rte_eth_dev_rx_intr_disable(uint16_t port_id,
5024                             uint16_t queue_id)
5025 {
5026         struct rte_eth_dev *dev;
5027         int ret;
5028
5029         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5030         dev = &rte_eth_devices[port_id];
5031
5032         ret = eth_dev_validate_rx_queue(dev, queue_id);
5033         if (ret != 0)
5034                 return ret;
5035
5036         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
5037         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
5038 }
5039
5040
5041 const struct rte_eth_rxtx_callback *
5042 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
5043                 rte_rx_callback_fn fn, void *user_param)
5044 {
5045 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5046         rte_errno = ENOTSUP;
5047         return NULL;
5048 #endif
5049         struct rte_eth_dev *dev;
5050
5051         /* check input parameters */
5052         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5053                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5054                 rte_errno = EINVAL;
5055                 return NULL;
5056         }
5057         dev = &rte_eth_devices[port_id];
5058         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5059                 rte_errno = EINVAL;
5060                 return NULL;
5061         }
5062         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5063
5064         if (cb == NULL) {
5065                 rte_errno = ENOMEM;
5066                 return NULL;
5067         }
5068
5069         cb->fn.rx = fn;
5070         cb->param = user_param;
5071
5072         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5073         /* Add the callbacks in fifo order. */
5074         struct rte_eth_rxtx_callback *tail =
5075                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5076
5077         if (!tail) {
5078                 /* Stores to cb->fn and cb->param should complete before
5079                  * cb is visible to data plane.
5080                  */
5081                 __atomic_store_n(
5082                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5083                         cb, __ATOMIC_RELEASE);
5084
5085         } else {
5086                 while (tail->next)
5087                         tail = tail->next;
5088                 /* Stores to cb->fn and cb->param should complete before
5089                  * cb is visible to data plane.
5090                  */
5091                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5092         }
5093         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5094
5095         return cb;
5096 }
5097
5098 const struct rte_eth_rxtx_callback *
5099 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5100                 rte_rx_callback_fn fn, void *user_param)
5101 {
5102 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5103         rte_errno = ENOTSUP;
5104         return NULL;
5105 #endif
5106         /* check input parameters */
5107         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5108                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5109                 rte_errno = EINVAL;
5110                 return NULL;
5111         }
5112
5113         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5114
5115         if (cb == NULL) {
5116                 rte_errno = ENOMEM;
5117                 return NULL;
5118         }
5119
5120         cb->fn.rx = fn;
5121         cb->param = user_param;
5122
5123         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5124         /* Add the callbacks at first position */
5125         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5126         /* Stores to cb->fn, cb->param and cb->next should complete before
5127          * cb is visible to data plane threads.
5128          */
5129         __atomic_store_n(
5130                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5131                 cb, __ATOMIC_RELEASE);
5132         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5133
5134         return cb;
5135 }
5136
5137 const struct rte_eth_rxtx_callback *
5138 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5139                 rte_tx_callback_fn fn, void *user_param)
5140 {
5141 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5142         rte_errno = ENOTSUP;
5143         return NULL;
5144 #endif
5145         struct rte_eth_dev *dev;
5146
5147         /* check input parameters */
5148         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5149                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
5150                 rte_errno = EINVAL;
5151                 return NULL;
5152         }
5153
5154         dev = &rte_eth_devices[port_id];
5155         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5156                 rte_errno = EINVAL;
5157                 return NULL;
5158         }
5159
5160         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5161
5162         if (cb == NULL) {
5163                 rte_errno = ENOMEM;
5164                 return NULL;
5165         }
5166
5167         cb->fn.tx = fn;
5168         cb->param = user_param;
5169
5170         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5171         /* Add the callbacks in fifo order. */
5172         struct rte_eth_rxtx_callback *tail =
5173                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
5174
5175         if (!tail) {
5176                 /* Stores to cb->fn and cb->param should complete before
5177                  * cb is visible to data plane.
5178                  */
5179                 __atomic_store_n(
5180                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
5181                         cb, __ATOMIC_RELEASE);
5182
5183         } else {
5184                 while (tail->next)
5185                         tail = tail->next;
5186                 /* Stores to cb->fn and cb->param should complete before
5187                  * cb is visible to data plane.
5188                  */
5189                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5190         }
5191         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5192
5193         return cb;
5194 }
5195
5196 int
5197 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5198                 const struct rte_eth_rxtx_callback *user_cb)
5199 {
5200 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5201         return -ENOTSUP;
5202 #endif
5203         /* Check input parameters. */
5204         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5205         if (user_cb == NULL ||
5206                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
5207                 return -EINVAL;
5208
5209         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5210         struct rte_eth_rxtx_callback *cb;
5211         struct rte_eth_rxtx_callback **prev_cb;
5212         int ret = -EINVAL;
5213
5214         rte_spinlock_lock(&eth_dev_rx_cb_lock);
5215         prev_cb = &dev->post_rx_burst_cbs[queue_id];
5216         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5217                 cb = *prev_cb;
5218                 if (cb == user_cb) {
5219                         /* Remove the user cb from the callback list. */
5220                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5221                         ret = 0;
5222                         break;
5223                 }
5224         }
5225         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5226
5227         return ret;
5228 }
5229
5230 int
5231 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5232                 const struct rte_eth_rxtx_callback *user_cb)
5233 {
5234 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5235         return -ENOTSUP;
5236 #endif
5237         /* Check input parameters. */
5238         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5239         if (user_cb == NULL ||
5240                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5241                 return -EINVAL;
5242
5243         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5244         int ret = -EINVAL;
5245         struct rte_eth_rxtx_callback *cb;
5246         struct rte_eth_rxtx_callback **prev_cb;
5247
5248         rte_spinlock_lock(&eth_dev_tx_cb_lock);
5249         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5250         for (; *prev_cb != NULL; prev_cb = &cb->next) {
5251                 cb = *prev_cb;
5252                 if (cb == user_cb) {
5253                         /* Remove the user cb from the callback list. */
5254                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5255                         ret = 0;
5256                         break;
5257                 }
5258         }
5259         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5260
5261         return ret;
5262 }
5263
5264 int
5265 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5266         struct rte_eth_rxq_info *qinfo)
5267 {
5268         struct rte_eth_dev *dev;
5269
5270         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5271         dev = &rte_eth_devices[port_id];
5272
5273         if (queue_id >= dev->data->nb_rx_queues) {
5274                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5275                 return -EINVAL;
5276         }
5277
5278         if (qinfo == NULL) {
5279                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5280                         port_id, queue_id);
5281                 return -EINVAL;
5282         }
5283
5284         if (dev->data->rx_queues == NULL ||
5285                         dev->data->rx_queues[queue_id] == NULL) {
5286                 RTE_ETHDEV_LOG(ERR,
5287                                "Rx queue %"PRIu16" of device with port_id=%"
5288                                PRIu16" has not been setup\n",
5289                                queue_id, port_id);
5290                 return -EINVAL;
5291         }
5292
5293         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5294                 RTE_ETHDEV_LOG(INFO,
5295                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5296                         queue_id, port_id);
5297                 return -EINVAL;
5298         }
5299
5300         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5301
5302         memset(qinfo, 0, sizeof(*qinfo));
5303         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5304         qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5305
5306         return 0;
5307 }
5308
5309 int
5310 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5311         struct rte_eth_txq_info *qinfo)
5312 {
5313         struct rte_eth_dev *dev;
5314
5315         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5316         dev = &rte_eth_devices[port_id];
5317
5318         if (queue_id >= dev->data->nb_tx_queues) {
5319                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5320                 return -EINVAL;
5321         }
5322
5323         if (qinfo == NULL) {
5324                 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5325                         port_id, queue_id);
5326                 return -EINVAL;
5327         }
5328
5329         if (dev->data->tx_queues == NULL ||
5330                         dev->data->tx_queues[queue_id] == NULL) {
5331                 RTE_ETHDEV_LOG(ERR,
5332                                "Tx queue %"PRIu16" of device with port_id=%"
5333                                PRIu16" has not been setup\n",
5334                                queue_id, port_id);
5335                 return -EINVAL;
5336         }
5337
5338         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5339                 RTE_ETHDEV_LOG(INFO,
5340                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5341                         queue_id, port_id);
5342                 return -EINVAL;
5343         }
5344
5345         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5346
5347         memset(qinfo, 0, sizeof(*qinfo));
5348         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5349         qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5350
5351         return 0;
5352 }
5353
5354 int
5355 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5356                           struct rte_eth_burst_mode *mode)
5357 {
5358         struct rte_eth_dev *dev;
5359
5360         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5361         dev = &rte_eth_devices[port_id];
5362
5363         if (queue_id >= dev->data->nb_rx_queues) {
5364                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5365                 return -EINVAL;
5366         }
5367
5368         if (mode == NULL) {
5369                 RTE_ETHDEV_LOG(ERR,
5370                         "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5371                         port_id, queue_id);
5372                 return -EINVAL;
5373         }
5374
5375         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5376         memset(mode, 0, sizeof(*mode));
5377         return eth_err(port_id,
5378                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5379 }
5380
5381 int
5382 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5383                           struct rte_eth_burst_mode *mode)
5384 {
5385         struct rte_eth_dev *dev;
5386
5387         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5388         dev = &rte_eth_devices[port_id];
5389
5390         if (queue_id >= dev->data->nb_tx_queues) {
5391                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5392                 return -EINVAL;
5393         }
5394
5395         if (mode == NULL) {
5396                 RTE_ETHDEV_LOG(ERR,
5397                         "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5398                         port_id, queue_id);
5399                 return -EINVAL;
5400         }
5401
5402         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5403         memset(mode, 0, sizeof(*mode));
5404         return eth_err(port_id,
5405                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5406 }
5407
5408 int
5409 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5410                 struct rte_power_monitor_cond *pmc)
5411 {
5412         struct rte_eth_dev *dev;
5413
5414         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5415         dev = &rte_eth_devices[port_id];
5416
5417         if (queue_id >= dev->data->nb_rx_queues) {
5418                 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5419                 return -EINVAL;
5420         }
5421
5422         if (pmc == NULL) {
5423                 RTE_ETHDEV_LOG(ERR,
5424                         "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5425                         port_id, queue_id);
5426                 return -EINVAL;
5427         }
5428
5429         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
5430         return eth_err(port_id,
5431                 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5432 }
5433
5434 int
5435 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5436                              struct rte_ether_addr *mc_addr_set,
5437                              uint32_t nb_mc_addr)
5438 {
5439         struct rte_eth_dev *dev;
5440
5441         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5442         dev = &rte_eth_devices[port_id];
5443
5444         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5445         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5446                                                 mc_addr_set, nb_mc_addr));
5447 }
5448
5449 int
5450 rte_eth_timesync_enable(uint16_t port_id)
5451 {
5452         struct rte_eth_dev *dev;
5453
5454         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5455         dev = &rte_eth_devices[port_id];
5456
5457         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5458         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5459 }
5460
5461 int
5462 rte_eth_timesync_disable(uint16_t port_id)
5463 {
5464         struct rte_eth_dev *dev;
5465
5466         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5467         dev = &rte_eth_devices[port_id];
5468
5469         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5470         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5471 }
5472
5473 int
5474 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5475                                    uint32_t flags)
5476 {
5477         struct rte_eth_dev *dev;
5478
5479         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5480         dev = &rte_eth_devices[port_id];
5481
5482         if (timestamp == NULL) {
5483                 RTE_ETHDEV_LOG(ERR,
5484                         "Cannot read ethdev port %u Rx timestamp to NULL\n",
5485                         port_id);
5486                 return -EINVAL;
5487         }
5488
5489         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5490         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5491                                 (dev, timestamp, flags));
5492 }
5493
5494 int
5495 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5496                                    struct timespec *timestamp)
5497 {
5498         struct rte_eth_dev *dev;
5499
5500         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5501         dev = &rte_eth_devices[port_id];
5502
5503         if (timestamp == NULL) {
5504                 RTE_ETHDEV_LOG(ERR,
5505                         "Cannot read ethdev port %u Tx timestamp to NULL\n",
5506                         port_id);
5507                 return -EINVAL;
5508         }
5509
5510         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5511         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5512                                 (dev, timestamp));
5513 }
5514
5515 int
5516 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5517 {
5518         struct rte_eth_dev *dev;
5519
5520         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5521         dev = &rte_eth_devices[port_id];
5522
5523         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5524         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5525 }
5526
5527 int
5528 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5529 {
5530         struct rte_eth_dev *dev;
5531
5532         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5533         dev = &rte_eth_devices[port_id];
5534
5535         if (timestamp == NULL) {
5536                 RTE_ETHDEV_LOG(ERR,
5537                         "Cannot read ethdev port %u timesync time to NULL\n",
5538                         port_id);
5539                 return -EINVAL;
5540         }
5541
5542         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5543         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5544                                                                 timestamp));
5545 }
5546
5547 int
5548 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5549 {
5550         struct rte_eth_dev *dev;
5551
5552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5553         dev = &rte_eth_devices[port_id];
5554
5555         if (timestamp == NULL) {
5556                 RTE_ETHDEV_LOG(ERR,
5557                         "Cannot write ethdev port %u timesync from NULL time\n",
5558                         port_id);
5559                 return -EINVAL;
5560         }
5561
5562         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5563         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5564                                                                 timestamp));
5565 }
5566
5567 int
5568 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5569 {
5570         struct rte_eth_dev *dev;
5571
5572         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5573         dev = &rte_eth_devices[port_id];
5574
5575         if (clock == NULL) {
5576                 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5577                         port_id);
5578                 return -EINVAL;
5579         }
5580
5581         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5582         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5583 }
5584
5585 int
5586 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5587 {
5588         struct rte_eth_dev *dev;
5589
5590         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5591         dev = &rte_eth_devices[port_id];
5592
5593         if (info == NULL) {
5594                 RTE_ETHDEV_LOG(ERR,
5595                         "Cannot get ethdev port %u register info to NULL\n",
5596                         port_id);
5597                 return -EINVAL;
5598         }
5599
5600         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5601         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5602 }
5603
5604 int
5605 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5606 {
5607         struct rte_eth_dev *dev;
5608
5609         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5610         dev = &rte_eth_devices[port_id];
5611
5612         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5613         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5614 }
5615
5616 int
5617 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5618 {
5619         struct rte_eth_dev *dev;
5620
5621         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5622         dev = &rte_eth_devices[port_id];
5623
5624         if (info == NULL) {
5625                 RTE_ETHDEV_LOG(ERR,
5626                         "Cannot get ethdev port %u EEPROM info to NULL\n",
5627                         port_id);
5628                 return -EINVAL;
5629         }
5630
5631         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5632         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5633 }
5634
5635 int
5636 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5637 {
5638         struct rte_eth_dev *dev;
5639
5640         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5641         dev = &rte_eth_devices[port_id];
5642
5643         if (info == NULL) {
5644                 RTE_ETHDEV_LOG(ERR,
5645                         "Cannot set ethdev port %u EEPROM from NULL info\n",
5646                         port_id);
5647                 return -EINVAL;
5648         }
5649
5650         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5651         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5652 }
5653
5654 int
5655 rte_eth_dev_get_module_info(uint16_t port_id,
5656                             struct rte_eth_dev_module_info *modinfo)
5657 {
5658         struct rte_eth_dev *dev;
5659
5660         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5661         dev = &rte_eth_devices[port_id];
5662
5663         if (modinfo == NULL) {
5664                 RTE_ETHDEV_LOG(ERR,
5665                         "Cannot get ethdev port %u EEPROM module info to NULL\n",
5666                         port_id);
5667                 return -EINVAL;
5668         }
5669
5670         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5671         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5672 }
5673
5674 int
5675 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5676                               struct rte_dev_eeprom_info *info)
5677 {
5678         struct rte_eth_dev *dev;
5679
5680         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5681         dev = &rte_eth_devices[port_id];
5682
5683         if (info == NULL) {
5684                 RTE_ETHDEV_LOG(ERR,
5685                         "Cannot get ethdev port %u module EEPROM info to NULL\n",
5686                         port_id);
5687                 return -EINVAL;
5688         }
5689
5690         if (info->data == NULL) {
5691                 RTE_ETHDEV_LOG(ERR,
5692                         "Cannot get ethdev port %u module EEPROM data to NULL\n",
5693                         port_id);
5694                 return -EINVAL;
5695         }
5696
5697         if (info->length == 0) {
5698                 RTE_ETHDEV_LOG(ERR,
5699                         "Cannot get ethdev port %u module EEPROM to data with zero size\n",
5700                         port_id);
5701                 return -EINVAL;
5702         }
5703
5704         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5705         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5706 }
5707
5708 int
5709 rte_eth_dev_get_dcb_info(uint16_t port_id,
5710                              struct rte_eth_dcb_info *dcb_info)
5711 {
5712         struct rte_eth_dev *dev;
5713
5714         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5715         dev = &rte_eth_devices[port_id];
5716
5717         if (dcb_info == NULL) {
5718                 RTE_ETHDEV_LOG(ERR,
5719                         "Cannot get ethdev port %u DCB info to NULL\n",
5720                         port_id);
5721                 return -EINVAL;
5722         }
5723
5724         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5725
5726         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5727         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5728 }
5729
5730 static void
5731 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5732                 const struct rte_eth_desc_lim *desc_lim)
5733 {
5734         if (desc_lim->nb_align != 0)
5735                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5736
5737         if (desc_lim->nb_max != 0)
5738                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5739
5740         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5741 }
5742
5743 int
5744 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5745                                  uint16_t *nb_rx_desc,
5746                                  uint16_t *nb_tx_desc)
5747 {
5748         struct rte_eth_dev_info dev_info;
5749         int ret;
5750
5751         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5752
5753         ret = rte_eth_dev_info_get(port_id, &dev_info);
5754         if (ret != 0)
5755                 return ret;
5756
5757         if (nb_rx_desc != NULL)
5758                 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5759
5760         if (nb_tx_desc != NULL)
5761                 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5762
5763         return 0;
5764 }
5765
5766 int
5767 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5768                                    struct rte_eth_hairpin_cap *cap)
5769 {
5770         struct rte_eth_dev *dev;
5771
5772         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5773         dev = &rte_eth_devices[port_id];
5774
5775         if (cap == NULL) {
5776                 RTE_ETHDEV_LOG(ERR,
5777                         "Cannot get ethdev port %u hairpin capability to NULL\n",
5778                         port_id);
5779                 return -EINVAL;
5780         }
5781
5782         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5783         memset(cap, 0, sizeof(*cap));
5784         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5785 }
5786
5787 int
5788 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5789 {
5790         if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5791                 return 1;
5792         return 0;
5793 }
5794
5795 int
5796 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5797 {
5798         if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
5799                 return 1;
5800         return 0;
5801 }
5802
5803 int
5804 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5805 {
5806         struct rte_eth_dev *dev;
5807
5808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5809         dev = &rte_eth_devices[port_id];
5810
5811         if (pool == NULL) {
5812                 RTE_ETHDEV_LOG(ERR,
5813                         "Cannot test ethdev port %u mempool operation from NULL pool\n",
5814                         port_id);
5815                 return -EINVAL;
5816         }
5817
5818         if (*dev->dev_ops->pool_ops_supported == NULL)
5819                 return 1; /* all pools are supported */
5820
5821         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5822 }
5823
5824 /**
5825  * A set of values to describe the possible states of a switch domain.
5826  */
5827 enum rte_eth_switch_domain_state {
5828         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5829         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5830 };
5831
5832 /**
5833  * Array of switch domains available for allocation. Array is sized to
5834  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5835  * ethdev ports in a single process.
5836  */
5837 static struct rte_eth_dev_switch {
5838         enum rte_eth_switch_domain_state state;
5839 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
5840
5841 int
5842 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5843 {
5844         uint16_t i;
5845
5846         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5847
5848         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5849                 if (eth_dev_switch_domains[i].state ==
5850                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5851                         eth_dev_switch_domains[i].state =
5852                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5853                         *domain_id = i;
5854                         return 0;
5855                 }
5856         }
5857
5858         return -ENOSPC;
5859 }
5860
5861 int
5862 rte_eth_switch_domain_free(uint16_t domain_id)
5863 {
5864         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5865                 domain_id >= RTE_MAX_ETHPORTS)
5866                 return -EINVAL;
5867
5868         if (eth_dev_switch_domains[domain_id].state !=
5869                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5870                 return -EINVAL;
5871
5872         eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5873
5874         return 0;
5875 }
5876
5877 static int
5878 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5879 {
5880         int state;
5881         struct rte_kvargs_pair *pair;
5882         char *letter;
5883
5884         arglist->str = strdup(str_in);
5885         if (arglist->str == NULL)
5886                 return -ENOMEM;
5887
5888         letter = arglist->str;
5889         state = 0;
5890         arglist->count = 0;
5891         pair = &arglist->pairs[0];
5892         while (1) {
5893                 switch (state) {
5894                 case 0: /* Initial */
5895                         if (*letter == '=')
5896                                 return -EINVAL;
5897                         else if (*letter == '\0')
5898                                 return 0;
5899
5900                         state = 1;
5901                         pair->key = letter;
5902                         /* fall-thru */
5903
5904                 case 1: /* Parsing key */
5905                         if (*letter == '=') {
5906                                 *letter = '\0';
5907                                 pair->value = letter + 1;
5908                                 state = 2;
5909                         } else if (*letter == ',' || *letter == '\0')
5910                                 return -EINVAL;
5911                         break;
5912
5913
5914                 case 2: /* Parsing value */
5915                         if (*letter == '[')
5916                                 state = 3;
5917                         else if (*letter == ',') {
5918                                 *letter = '\0';
5919                                 arglist->count++;
5920                                 pair = &arglist->pairs[arglist->count];
5921                                 state = 0;
5922                         } else if (*letter == '\0') {
5923                                 letter--;
5924                                 arglist->count++;
5925                                 pair = &arglist->pairs[arglist->count];
5926                                 state = 0;
5927                         }
5928                         break;
5929
5930                 case 3: /* Parsing list */
5931                         if (*letter == ']')
5932                                 state = 2;
5933                         else if (*letter == '\0')
5934                                 return -EINVAL;
5935                         break;
5936                 }
5937                 letter++;
5938         }
5939 }
5940
5941 int
5942 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5943 {
5944         struct rte_kvargs args;
5945         struct rte_kvargs_pair *pair;
5946         unsigned int i;
5947         int result = 0;
5948
5949         memset(eth_da, 0, sizeof(*eth_da));
5950
5951         result = eth_dev_devargs_tokenise(&args, dargs);
5952         if (result < 0)
5953                 goto parse_cleanup;
5954
5955         for (i = 0; i < args.count; i++) {
5956                 pair = &args.pairs[i];
5957                 if (strcmp("representor", pair->key) == 0) {
5958                         if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
5959                                 RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
5960                                         dargs);
5961                                 result = -1;
5962                                 goto parse_cleanup;
5963                         }
5964                         result = rte_eth_devargs_parse_representor_ports(
5965                                         pair->value, eth_da);
5966                         if (result < 0)
5967                                 goto parse_cleanup;
5968                 }
5969         }
5970
5971 parse_cleanup:
5972         if (args.str)
5973                 free(args.str);
5974
5975         return result;
5976 }
5977
5978 int
5979 rte_eth_representor_id_get(const struct rte_eth_dev *ethdev,
5980                            enum rte_eth_representor_type type,
5981                            int controller, int pf, int representor_port,
5982                            uint16_t *repr_id)
5983 {
5984         int ret, n, count;
5985         uint32_t i;
5986         struct rte_eth_representor_info *info = NULL;
5987         size_t size;
5988
5989         if (type == RTE_ETH_REPRESENTOR_NONE)
5990                 return 0;
5991         if (repr_id == NULL)
5992                 return -EINVAL;
5993
5994         /* Get PMD representor range info. */
5995         ret = rte_eth_representor_info_get(ethdev->data->port_id, NULL);
5996         if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
5997             controller == -1 && pf == -1) {
5998                 /* Direct mapping for legacy VF representor. */
5999                 *repr_id = representor_port;
6000                 return 0;
6001         } else if (ret < 0) {
6002                 return ret;
6003         }
6004         n = ret;
6005         size = sizeof(*info) + n * sizeof(info->ranges[0]);
6006         info = calloc(1, size);
6007         if (info == NULL)
6008                 return -ENOMEM;
6009         info->nb_ranges_alloc = n;
6010         ret = rte_eth_representor_info_get(ethdev->data->port_id, info);
6011         if (ret < 0)
6012                 goto out;
6013
6014         /* Default controller and pf to caller. */
6015         if (controller == -1)
6016                 controller = info->controller;
6017         if (pf == -1)
6018                 pf = info->pf;
6019
6020         /* Locate representor ID. */
6021         ret = -ENOENT;
6022         for (i = 0; i < info->nb_ranges; ++i) {
6023                 if (info->ranges[i].type != type)
6024                         continue;
6025                 if (info->ranges[i].controller != controller)
6026                         continue;
6027                 if (info->ranges[i].id_end < info->ranges[i].id_base) {
6028                         RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
6029                                 ethdev->data->port_id, info->ranges[i].id_base,
6030                                 info->ranges[i].id_end, i);
6031                         continue;
6032
6033                 }
6034                 count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
6035                 switch (info->ranges[i].type) {
6036                 case RTE_ETH_REPRESENTOR_PF:
6037                         if (pf < info->ranges[i].pf ||
6038                             pf >= info->ranges[i].pf + count)
6039                                 continue;
6040                         *repr_id = info->ranges[i].id_base +
6041                                    (pf - info->ranges[i].pf);
6042                         ret = 0;
6043                         goto out;
6044                 case RTE_ETH_REPRESENTOR_VF:
6045                         if (info->ranges[i].pf != pf)
6046                                 continue;
6047                         if (representor_port < info->ranges[i].vf ||
6048                             representor_port >= info->ranges[i].vf + count)
6049                                 continue;
6050                         *repr_id = info->ranges[i].id_base +
6051                                    (representor_port - info->ranges[i].vf);
6052                         ret = 0;
6053                         goto out;
6054                 case RTE_ETH_REPRESENTOR_SF:
6055                         if (info->ranges[i].pf != pf)
6056                                 continue;
6057                         if (representor_port < info->ranges[i].sf ||
6058                             representor_port >= info->ranges[i].sf + count)
6059                                 continue;
6060                         *repr_id = info->ranges[i].id_base +
6061                               (representor_port - info->ranges[i].sf);
6062                         ret = 0;
6063                         goto out;
6064                 default:
6065                         break;
6066                 }
6067         }
6068 out:
6069         free(info);
6070         return ret;
6071 }
6072
6073 static int
6074 eth_dev_handle_port_list(const char *cmd __rte_unused,
6075                 const char *params __rte_unused,
6076                 struct rte_tel_data *d)
6077 {
6078         int port_id;
6079
6080         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
6081         RTE_ETH_FOREACH_DEV(port_id)
6082                 rte_tel_data_add_array_int(d, port_id);
6083         return 0;
6084 }
6085
6086 static void
6087 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
6088                 const char *stat_name)
6089 {
6090         int q;
6091         struct rte_tel_data *q_data = rte_tel_data_alloc();
6092         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
6093         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
6094                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
6095         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
6096 }
6097
6098 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
6099
6100 static int
6101 eth_dev_handle_port_stats(const char *cmd __rte_unused,
6102                 const char *params,
6103                 struct rte_tel_data *d)
6104 {
6105         struct rte_eth_stats stats;
6106         int port_id, ret;
6107
6108         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6109                 return -1;
6110
6111         port_id = atoi(params);
6112         if (!rte_eth_dev_is_valid_port(port_id))
6113                 return -1;
6114
6115         ret = rte_eth_stats_get(port_id, &stats);
6116         if (ret < 0)
6117                 return -1;
6118
6119         rte_tel_data_start_dict(d);
6120         ADD_DICT_STAT(stats, ipackets);
6121         ADD_DICT_STAT(stats, opackets);
6122         ADD_DICT_STAT(stats, ibytes);
6123         ADD_DICT_STAT(stats, obytes);
6124         ADD_DICT_STAT(stats, imissed);
6125         ADD_DICT_STAT(stats, ierrors);
6126         ADD_DICT_STAT(stats, oerrors);
6127         ADD_DICT_STAT(stats, rx_nombuf);
6128         eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
6129         eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
6130         eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
6131         eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
6132         eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
6133
6134         return 0;
6135 }
6136
6137 static int
6138 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
6139                 const char *params,
6140                 struct rte_tel_data *d)
6141 {
6142         struct rte_eth_xstat *eth_xstats;
6143         struct rte_eth_xstat_name *xstat_names;
6144         int port_id, num_xstats;
6145         int i, ret;
6146         char *end_param;
6147
6148         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6149                 return -1;
6150
6151         port_id = strtoul(params, &end_param, 0);
6152         if (*end_param != '\0')
6153                 RTE_ETHDEV_LOG(NOTICE,
6154                         "Extra parameters passed to ethdev telemetry command, ignoring");
6155         if (!rte_eth_dev_is_valid_port(port_id))
6156                 return -1;
6157
6158         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
6159         if (num_xstats < 0)
6160                 return -1;
6161
6162         /* use one malloc for both names and stats */
6163         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
6164                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
6165         if (eth_xstats == NULL)
6166                 return -1;
6167         xstat_names = (void *)&eth_xstats[num_xstats];
6168
6169         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
6170         if (ret < 0 || ret > num_xstats) {
6171                 free(eth_xstats);
6172                 return -1;
6173         }
6174
6175         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
6176         if (ret < 0 || ret > num_xstats) {
6177                 free(eth_xstats);
6178                 return -1;
6179         }
6180
6181         rte_tel_data_start_dict(d);
6182         for (i = 0; i < num_xstats; i++)
6183                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
6184                                 eth_xstats[i].value);
6185         return 0;
6186 }
6187
6188 static int
6189 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
6190                 const char *params,
6191                 struct rte_tel_data *d)
6192 {
6193         static const char *status_str = "status";
6194         int ret, port_id;
6195         struct rte_eth_link link;
6196         char *end_param;
6197
6198         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6199                 return -1;
6200
6201         port_id = strtoul(params, &end_param, 0);
6202         if (*end_param != '\0')
6203                 RTE_ETHDEV_LOG(NOTICE,
6204                         "Extra parameters passed to ethdev telemetry command, ignoring");
6205         if (!rte_eth_dev_is_valid_port(port_id))
6206                 return -1;
6207
6208         ret = rte_eth_link_get_nowait(port_id, &link);
6209         if (ret < 0)
6210                 return -1;
6211
6212         rte_tel_data_start_dict(d);
6213         if (!link.link_status) {
6214                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
6215                 return 0;
6216         }
6217         rte_tel_data_add_dict_string(d, status_str, "UP");
6218         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
6219         rte_tel_data_add_dict_string(d, "duplex",
6220                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
6221                                 "full-duplex" : "half-duplex");
6222         return 0;
6223 }
6224
6225 int
6226 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
6227                                   struct rte_hairpin_peer_info *cur_info,
6228                                   struct rte_hairpin_peer_info *peer_info,
6229                                   uint32_t direction)
6230 {
6231         struct rte_eth_dev *dev;
6232
6233         /* Current queue information is not mandatory. */
6234         if (peer_info == NULL)
6235                 return -EINVAL;
6236
6237         /* No need to check the validity again. */
6238         dev = &rte_eth_devices[peer_port];
6239         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
6240                                 -ENOTSUP);
6241
6242         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
6243                                         cur_info, peer_info, direction);
6244 }
6245
6246 int
6247 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
6248                                 struct rte_hairpin_peer_info *peer_info,
6249                                 uint32_t direction)
6250 {
6251         struct rte_eth_dev *dev;
6252
6253         if (peer_info == NULL)
6254                 return -EINVAL;
6255
6256         /* No need to check the validity again. */
6257         dev = &rte_eth_devices[cur_port];
6258         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
6259                                 -ENOTSUP);
6260
6261         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
6262                                                         peer_info, direction);
6263 }
6264
6265 int
6266 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
6267                                   uint32_t direction)
6268 {
6269         struct rte_eth_dev *dev;
6270
6271         /* No need to check the validity again. */
6272         dev = &rte_eth_devices[cur_port];
6273         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
6274                                 -ENOTSUP);
6275
6276         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
6277                                                           direction);
6278 }
6279
6280 int
6281 rte_eth_representor_info_get(uint16_t port_id,
6282                              struct rte_eth_representor_info *info)
6283 {
6284         struct rte_eth_dev *dev;
6285
6286         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6287         dev = &rte_eth_devices[port_id];
6288
6289         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
6290         return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6291 }
6292
6293 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
6294
6295 RTE_INIT(ethdev_init_telemetry)
6296 {
6297         rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
6298                         "Returns list of available ethdev ports. Takes no parameters");
6299         rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
6300                         "Returns the common stats for a port. Parameters: int port_id");
6301         rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
6302                         "Returns the extended stats for a port. Parameters: int port_id");
6303         rte_telemetry_register_cmd("/ethdev/link_status",
6304                         eth_dev_handle_port_link_status,
6305                         "Returns the link status for a port. Parameters: int port_id");
6306 }