ethdev: fix using Rx split config before null check
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_kvargs.h>
38 #include <rte_class.h>
39 #include <rte_ether.h>
40 #include <rte_telemetry.h>
41
42 #include "rte_ethdev_trace.h"
43 #include "rte_ethdev.h"
44 #include "rte_ethdev_driver.h"
45 #include "ethdev_profile.h"
46 #include "ethdev_private.h"
47
48 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
49 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
50
51 /* spinlock for eth device callbacks */
52 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* spinlock for add/remove rx callbacks */
55 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57 /* spinlock for add/remove tx callbacks */
58 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60 /* spinlock for shared data allocation */
61 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
62
63 /* store statistics names and its offset in stats structure  */
64 struct rte_eth_xstats_name_off {
65         char name[RTE_ETH_XSTATS_NAME_SIZE];
66         unsigned offset;
67 };
68
69 /* Shared memory between primary and secondary processes. */
70 static struct {
71         uint64_t next_owner_id;
72         rte_spinlock_t ownership_lock;
73         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
74 } *eth_dev_shared_data;
75
76 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
77         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
78         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
79         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
80         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
81         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
82         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
83         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
84         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
85                 rx_nombuf)},
86 };
87
88 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
89
90 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
91         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
92         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
93         {"errors", offsetof(struct rte_eth_stats, q_errors)},
94 };
95
96 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
97
98 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
99         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
100         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
101 };
102 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
103
104 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
105         { DEV_RX_OFFLOAD_##_name, #_name }
106
107 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)       \
108         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } eth_dev_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
133         RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
134 };
135
136 #undef RTE_RX_OFFLOAD_BIT2STR
137 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
138
139 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
140         { DEV_TX_OFFLOAD_##_name, #_name }
141
142 static const struct {
143         uint64_t offload;
144         const char *name;
145 } eth_dev_tx_offload_names[] = {
146         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
147         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
151         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
154         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
155         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
157         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
158         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
159         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
160         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
161         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
162         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
163         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
164         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
165         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
166         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
167         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
168 };
169
170 #undef RTE_TX_OFFLOAD_BIT2STR
171
172 /**
173  * The user application callback description.
174  *
175  * It contains callback address to be registered by user application,
176  * the pointer to the parameters for callback, and the event type.
177  */
178 struct rte_eth_dev_callback {
179         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
180         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
181         void *cb_arg;                           /**< Parameter for callback */
182         void *ret_param;                        /**< Return parameter */
183         enum rte_eth_event_type event;          /**< Interrupt event type */
184         uint32_t active;                        /**< Callback is executing */
185 };
186
187 enum {
188         STAT_QMAP_TX = 0,
189         STAT_QMAP_RX
190 };
191
192 int
193 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
194 {
195         int ret;
196         struct rte_devargs devargs = {.args = NULL};
197         const char *bus_param_key;
198         char *bus_str = NULL;
199         char *cls_str = NULL;
200         int str_size;
201
202         memset(iter, 0, sizeof(*iter));
203
204         /*
205          * The devargs string may use various syntaxes:
206          *   - 0000:08:00.0,representor=[1-3]
207          *   - pci:0000:06:00.0,representor=[0,5]
208          *   - class=eth,mac=00:11:22:33:44:55
209          * A new syntax is in development (not yet supported):
210          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
211          */
212
213         /*
214          * Handle pure class filter (i.e. without any bus-level argument),
215          * from future new syntax.
216          * rte_devargs_parse() is not yet supporting the new syntax,
217          * that's why this simple case is temporarily parsed here.
218          */
219 #define iter_anybus_str "class=eth,"
220         if (strncmp(devargs_str, iter_anybus_str,
221                         strlen(iter_anybus_str)) == 0) {
222                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
223                 goto end;
224         }
225
226         /* Split bus, device and parameters. */
227         ret = rte_devargs_parse(&devargs, devargs_str);
228         if (ret != 0)
229                 goto error;
230
231         /*
232          * Assume parameters of old syntax can match only at ethdev level.
233          * Extra parameters will be ignored, thanks to "+" prefix.
234          */
235         str_size = strlen(devargs.args) + 2;
236         cls_str = malloc(str_size);
237         if (cls_str == NULL) {
238                 ret = -ENOMEM;
239                 goto error;
240         }
241         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
242         if (ret != str_size - 1) {
243                 ret = -EINVAL;
244                 goto error;
245         }
246         iter->cls_str = cls_str;
247         free(devargs.args); /* allocated by rte_devargs_parse() */
248         devargs.args = NULL;
249
250         iter->bus = devargs.bus;
251         if (iter->bus->dev_iterate == NULL) {
252                 ret = -ENOTSUP;
253                 goto error;
254         }
255
256         /* Convert bus args to new syntax for use with new API dev_iterate. */
257         if (strcmp(iter->bus->name, "vdev") == 0) {
258                 bus_param_key = "name";
259         } else if (strcmp(iter->bus->name, "pci") == 0) {
260                 bus_param_key = "addr";
261         } else {
262                 ret = -ENOTSUP;
263                 goto error;
264         }
265         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
266         bus_str = malloc(str_size);
267         if (bus_str == NULL) {
268                 ret = -ENOMEM;
269                 goto error;
270         }
271         ret = snprintf(bus_str, str_size, "%s=%s",
272                         bus_param_key, devargs.name);
273         if (ret != str_size - 1) {
274                 ret = -EINVAL;
275                 goto error;
276         }
277         iter->bus_str = bus_str;
278
279 end:
280         iter->cls = rte_class_find_by_name("eth");
281         return 0;
282
283 error:
284         if (ret == -ENOTSUP)
285                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
286                                 iter->bus->name);
287         free(devargs.args);
288         free(bus_str);
289         free(cls_str);
290         return ret;
291 }
292
293 uint16_t
294 rte_eth_iterator_next(struct rte_dev_iterator *iter)
295 {
296         if (iter->cls == NULL) /* invalid ethdev iterator */
297                 return RTE_MAX_ETHPORTS;
298
299         do { /* loop to try all matching rte_device */
300                 /* If not pure ethdev filter and */
301                 if (iter->bus != NULL &&
302                                 /* not in middle of rte_eth_dev iteration, */
303                                 iter->class_device == NULL) {
304                         /* get next rte_device to try. */
305                         iter->device = iter->bus->dev_iterate(
306                                         iter->device, iter->bus_str, iter);
307                         if (iter->device == NULL)
308                                 break; /* no more rte_device candidate */
309                 }
310                 /* A device is matching bus part, need to check ethdev part. */
311                 iter->class_device = iter->cls->dev_iterate(
312                                 iter->class_device, iter->cls_str, iter);
313                 if (iter->class_device != NULL)
314                         return eth_dev_to_id(iter->class_device); /* match */
315         } while (iter->bus != NULL); /* need to try next rte_device */
316
317         /* No more ethdev port to iterate. */
318         rte_eth_iterator_cleanup(iter);
319         return RTE_MAX_ETHPORTS;
320 }
321
322 void
323 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
324 {
325         if (iter->bus_str == NULL)
326                 return; /* nothing to free in pure class filter */
327         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
328         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
329         memset(iter, 0, sizeof(*iter));
330 }
331
332 uint16_t
333 rte_eth_find_next(uint16_t port_id)
334 {
335         while (port_id < RTE_MAX_ETHPORTS &&
336                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
337                 port_id++;
338
339         if (port_id >= RTE_MAX_ETHPORTS)
340                 return RTE_MAX_ETHPORTS;
341
342         return port_id;
343 }
344
345 /*
346  * Macro to iterate over all valid ports for internal usage.
347  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
348  */
349 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
350         for (port_id = rte_eth_find_next(0); \
351              port_id < RTE_MAX_ETHPORTS; \
352              port_id = rte_eth_find_next(port_id + 1))
353
354 uint16_t
355 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
356 {
357         port_id = rte_eth_find_next(port_id);
358         while (port_id < RTE_MAX_ETHPORTS &&
359                         rte_eth_devices[port_id].device != parent)
360                 port_id = rte_eth_find_next(port_id + 1);
361
362         return port_id;
363 }
364
365 uint16_t
366 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
367 {
368         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
369         return rte_eth_find_next_of(port_id,
370                         rte_eth_devices[ref_port_id].device);
371 }
372
373 static void
374 eth_dev_shared_data_prepare(void)
375 {
376         const unsigned flags = 0;
377         const struct rte_memzone *mz;
378
379         rte_spinlock_lock(&eth_dev_shared_data_lock);
380
381         if (eth_dev_shared_data == NULL) {
382                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
383                         /* Allocate port data and ownership shared memory. */
384                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
385                                         sizeof(*eth_dev_shared_data),
386                                         rte_socket_id(), flags);
387                 } else
388                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
389                 if (mz == NULL)
390                         rte_panic("Cannot allocate ethdev shared data\n");
391
392                 eth_dev_shared_data = mz->addr;
393                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
394                         eth_dev_shared_data->next_owner_id =
395                                         RTE_ETH_DEV_NO_OWNER + 1;
396                         rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
397                         memset(eth_dev_shared_data->data, 0,
398                                sizeof(eth_dev_shared_data->data));
399                 }
400         }
401
402         rte_spinlock_unlock(&eth_dev_shared_data_lock);
403 }
404
405 static bool
406 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
407 {
408         return ethdev->data->name[0] != '\0';
409 }
410
411 static struct rte_eth_dev *
412 eth_dev_allocated(const char *name)
413 {
414         unsigned i;
415
416         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
417                 if (rte_eth_devices[i].data != NULL &&
418                     strcmp(rte_eth_devices[i].data->name, name) == 0)
419                         return &rte_eth_devices[i];
420         }
421         return NULL;
422 }
423
424 struct rte_eth_dev *
425 rte_eth_dev_allocated(const char *name)
426 {
427         struct rte_eth_dev *ethdev;
428
429         eth_dev_shared_data_prepare();
430
431         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
432
433         ethdev = eth_dev_allocated(name);
434
435         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
436
437         return ethdev;
438 }
439
440 static uint16_t
441 eth_dev_find_free_port(void)
442 {
443         unsigned i;
444
445         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
446                 /* Using shared name field to find a free port. */
447                 if (eth_dev_shared_data->data[i].name[0] == '\0') {
448                         RTE_ASSERT(rte_eth_devices[i].state ==
449                                    RTE_ETH_DEV_UNUSED);
450                         return i;
451                 }
452         }
453         return RTE_MAX_ETHPORTS;
454 }
455
456 static struct rte_eth_dev *
457 eth_dev_get(uint16_t port_id)
458 {
459         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
460
461         eth_dev->data = &eth_dev_shared_data->data[port_id];
462
463         return eth_dev;
464 }
465
466 struct rte_eth_dev *
467 rte_eth_dev_allocate(const char *name)
468 {
469         uint16_t port_id;
470         struct rte_eth_dev *eth_dev = NULL;
471         size_t name_len;
472
473         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
474         if (name_len == 0) {
475                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
476                 return NULL;
477         }
478
479         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
480                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
481                 return NULL;
482         }
483
484         eth_dev_shared_data_prepare();
485
486         /* Synchronize port creation between primary and secondary threads. */
487         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
488
489         if (eth_dev_allocated(name) != NULL) {
490                 RTE_ETHDEV_LOG(ERR,
491                         "Ethernet device with name %s already allocated\n",
492                         name);
493                 goto unlock;
494         }
495
496         port_id = eth_dev_find_free_port();
497         if (port_id == RTE_MAX_ETHPORTS) {
498                 RTE_ETHDEV_LOG(ERR,
499                         "Reached maximum number of Ethernet ports\n");
500                 goto unlock;
501         }
502
503         eth_dev = eth_dev_get(port_id);
504         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
505         eth_dev->data->port_id = port_id;
506         eth_dev->data->mtu = RTE_ETHER_MTU;
507         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
508
509 unlock:
510         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
511
512         return eth_dev;
513 }
514
515 /*
516  * Attach to a port already registered by the primary process, which
517  * makes sure that the same device would have the same port id both
518  * in the primary and secondary process.
519  */
520 struct rte_eth_dev *
521 rte_eth_dev_attach_secondary(const char *name)
522 {
523         uint16_t i;
524         struct rte_eth_dev *eth_dev = NULL;
525
526         eth_dev_shared_data_prepare();
527
528         /* Synchronize port attachment to primary port creation and release. */
529         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
530
531         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
532                 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
533                         break;
534         }
535         if (i == RTE_MAX_ETHPORTS) {
536                 RTE_ETHDEV_LOG(ERR,
537                         "Device %s is not driven by the primary process\n",
538                         name);
539         } else {
540                 eth_dev = eth_dev_get(i);
541                 RTE_ASSERT(eth_dev->data->port_id == i);
542         }
543
544         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
545         return eth_dev;
546 }
547
548 int
549 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
550 {
551         if (eth_dev == NULL)
552                 return -EINVAL;
553
554         eth_dev_shared_data_prepare();
555
556         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
557                 rte_eth_dev_callback_process(eth_dev,
558                                 RTE_ETH_EVENT_DESTROY, NULL);
559
560         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
561
562         eth_dev->state = RTE_ETH_DEV_UNUSED;
563         eth_dev->device = NULL;
564         eth_dev->process_private = NULL;
565         eth_dev->intr_handle = NULL;
566         eth_dev->rx_pkt_burst = NULL;
567         eth_dev->tx_pkt_burst = NULL;
568         eth_dev->tx_pkt_prepare = NULL;
569         eth_dev->rx_queue_count = NULL;
570         eth_dev->rx_descriptor_done = NULL;
571         eth_dev->rx_descriptor_status = NULL;
572         eth_dev->tx_descriptor_status = NULL;
573         eth_dev->dev_ops = NULL;
574
575         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
576                 rte_free(eth_dev->data->rx_queues);
577                 rte_free(eth_dev->data->tx_queues);
578                 rte_free(eth_dev->data->mac_addrs);
579                 rte_free(eth_dev->data->hash_mac_addrs);
580                 rte_free(eth_dev->data->dev_private);
581                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
582                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
583         }
584
585         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
586
587         return 0;
588 }
589
590 int
591 rte_eth_dev_is_valid_port(uint16_t port_id)
592 {
593         if (port_id >= RTE_MAX_ETHPORTS ||
594             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
595                 return 0;
596         else
597                 return 1;
598 }
599
600 static int
601 eth_is_valid_owner_id(uint64_t owner_id)
602 {
603         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
604             eth_dev_shared_data->next_owner_id <= owner_id)
605                 return 0;
606         return 1;
607 }
608
609 uint64_t
610 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
611 {
612         port_id = rte_eth_find_next(port_id);
613         while (port_id < RTE_MAX_ETHPORTS &&
614                         rte_eth_devices[port_id].data->owner.id != owner_id)
615                 port_id = rte_eth_find_next(port_id + 1);
616
617         return port_id;
618 }
619
620 int
621 rte_eth_dev_owner_new(uint64_t *owner_id)
622 {
623         eth_dev_shared_data_prepare();
624
625         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
626
627         *owner_id = eth_dev_shared_data->next_owner_id++;
628
629         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
630         return 0;
631 }
632
633 static int
634 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
635                        const struct rte_eth_dev_owner *new_owner)
636 {
637         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
638         struct rte_eth_dev_owner *port_owner;
639
640         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
641                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
642                         port_id);
643                 return -ENODEV;
644         }
645
646         if (!eth_is_valid_owner_id(new_owner->id) &&
647             !eth_is_valid_owner_id(old_owner_id)) {
648                 RTE_ETHDEV_LOG(ERR,
649                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
650                        old_owner_id, new_owner->id);
651                 return -EINVAL;
652         }
653
654         port_owner = &rte_eth_devices[port_id].data->owner;
655         if (port_owner->id != old_owner_id) {
656                 RTE_ETHDEV_LOG(ERR,
657                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
658                         port_id, port_owner->name, port_owner->id);
659                 return -EPERM;
660         }
661
662         /* can not truncate (same structure) */
663         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
664
665         port_owner->id = new_owner->id;
666
667         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
668                 port_id, new_owner->name, new_owner->id);
669
670         return 0;
671 }
672
673 int
674 rte_eth_dev_owner_set(const uint16_t port_id,
675                       const struct rte_eth_dev_owner *owner)
676 {
677         int ret;
678
679         eth_dev_shared_data_prepare();
680
681         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
682
683         ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
684
685         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
686         return ret;
687 }
688
689 int
690 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
691 {
692         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
693                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
694         int ret;
695
696         eth_dev_shared_data_prepare();
697
698         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
699
700         ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
701
702         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
703         return ret;
704 }
705
706 int
707 rte_eth_dev_owner_delete(const uint64_t owner_id)
708 {
709         uint16_t port_id;
710         int ret = 0;
711
712         eth_dev_shared_data_prepare();
713
714         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
715
716         if (eth_is_valid_owner_id(owner_id)) {
717                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
718                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
719                                 memset(&rte_eth_devices[port_id].data->owner, 0,
720                                        sizeof(struct rte_eth_dev_owner));
721                 RTE_ETHDEV_LOG(NOTICE,
722                         "All port owners owned by %016"PRIx64" identifier have removed\n",
723                         owner_id);
724         } else {
725                 RTE_ETHDEV_LOG(ERR,
726                                "Invalid owner id=%016"PRIx64"\n",
727                                owner_id);
728                 ret = -EINVAL;
729         }
730
731         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
732
733         return ret;
734 }
735
736 int
737 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
738 {
739         int ret = 0;
740         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
741
742         eth_dev_shared_data_prepare();
743
744         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
745
746         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
747                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
748                         port_id);
749                 ret = -ENODEV;
750         } else {
751                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
752         }
753
754         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
755         return ret;
756 }
757
758 int
759 rte_eth_dev_socket_id(uint16_t port_id)
760 {
761         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
762         return rte_eth_devices[port_id].data->numa_node;
763 }
764
765 void *
766 rte_eth_dev_get_sec_ctx(uint16_t port_id)
767 {
768         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
769         return rte_eth_devices[port_id].security_ctx;
770 }
771
772 uint16_t
773 rte_eth_dev_count_avail(void)
774 {
775         uint16_t p;
776         uint16_t count;
777
778         count = 0;
779
780         RTE_ETH_FOREACH_DEV(p)
781                 count++;
782
783         return count;
784 }
785
786 uint16_t
787 rte_eth_dev_count_total(void)
788 {
789         uint16_t port, count = 0;
790
791         RTE_ETH_FOREACH_VALID_DEV(port)
792                 count++;
793
794         return count;
795 }
796
797 int
798 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
799 {
800         char *tmp;
801
802         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
803
804         if (name == NULL) {
805                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
806                 return -EINVAL;
807         }
808
809         /* shouldn't check 'rte_eth_devices[i].data',
810          * because it might be overwritten by VDEV PMD */
811         tmp = eth_dev_shared_data->data[port_id].name;
812         strcpy(name, tmp);
813         return 0;
814 }
815
816 int
817 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
818 {
819         uint32_t pid;
820
821         if (name == NULL) {
822                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
823                 return -EINVAL;
824         }
825
826         RTE_ETH_FOREACH_VALID_DEV(pid)
827                 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
828                         *port_id = pid;
829                         return 0;
830                 }
831
832         return -ENODEV;
833 }
834
835 static int
836 eth_err(uint16_t port_id, int ret)
837 {
838         if (ret == 0)
839                 return 0;
840         if (rte_eth_dev_is_removed(port_id))
841                 return -EIO;
842         return ret;
843 }
844
845 static int
846 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
847 {
848         uint16_t old_nb_queues = dev->data->nb_rx_queues;
849         void **rxq;
850         unsigned i;
851
852         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
853                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
854                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
855                                 RTE_CACHE_LINE_SIZE);
856                 if (dev->data->rx_queues == NULL) {
857                         dev->data->nb_rx_queues = 0;
858                         return -(ENOMEM);
859                 }
860         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
861                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
862
863                 rxq = dev->data->rx_queues;
864
865                 for (i = nb_queues; i < old_nb_queues; i++)
866                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
867                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
868                                 RTE_CACHE_LINE_SIZE);
869                 if (rxq == NULL)
870                         return -(ENOMEM);
871                 if (nb_queues > old_nb_queues) {
872                         uint16_t new_qs = nb_queues - old_nb_queues;
873
874                         memset(rxq + old_nb_queues, 0,
875                                 sizeof(rxq[0]) * new_qs);
876                 }
877
878                 dev->data->rx_queues = rxq;
879
880         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
881                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
882
883                 rxq = dev->data->rx_queues;
884
885                 for (i = nb_queues; i < old_nb_queues; i++)
886                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
887
888                 rte_free(dev->data->rx_queues);
889                 dev->data->rx_queues = NULL;
890         }
891         dev->data->nb_rx_queues = nb_queues;
892         return 0;
893 }
894
895 static int
896 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
897 {
898         uint16_t port_id;
899
900         if (rx_queue_id >= dev->data->nb_rx_queues) {
901                 port_id = dev->data->port_id;
902                 RTE_ETHDEV_LOG(ERR,
903                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
904                                rx_queue_id, port_id);
905                 return -EINVAL;
906         }
907
908         if (dev->data->rx_queues[rx_queue_id] == NULL) {
909                 port_id = dev->data->port_id;
910                 RTE_ETHDEV_LOG(ERR,
911                                "Queue %u of device with port_id=%u has not been setup\n",
912                                rx_queue_id, port_id);
913                 return -EINVAL;
914         }
915
916         return 0;
917 }
918
919 static int
920 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
921 {
922         uint16_t port_id;
923
924         if (tx_queue_id >= dev->data->nb_tx_queues) {
925                 port_id = dev->data->port_id;
926                 RTE_ETHDEV_LOG(ERR,
927                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
928                                tx_queue_id, port_id);
929                 return -EINVAL;
930         }
931
932         if (dev->data->tx_queues[tx_queue_id] == NULL) {
933                 port_id = dev->data->port_id;
934                 RTE_ETHDEV_LOG(ERR,
935                                "Queue %u of device with port_id=%u has not been setup\n",
936                                tx_queue_id, port_id);
937                 return -EINVAL;
938         }
939
940         return 0;
941 }
942
943 int
944 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
945 {
946         struct rte_eth_dev *dev;
947         int ret;
948
949         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
950
951         dev = &rte_eth_devices[port_id];
952         if (!dev->data->dev_started) {
953                 RTE_ETHDEV_LOG(ERR,
954                         "Port %u must be started before start any queue\n",
955                         port_id);
956                 return -EINVAL;
957         }
958
959         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
960         if (ret != 0)
961                 return ret;
962
963         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
964
965         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
966                 RTE_ETHDEV_LOG(INFO,
967                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
968                         rx_queue_id, port_id);
969                 return -EINVAL;
970         }
971
972         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
973                 RTE_ETHDEV_LOG(INFO,
974                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
975                         rx_queue_id, port_id);
976                 return 0;
977         }
978
979         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
980                                                              rx_queue_id));
981
982 }
983
984 int
985 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
986 {
987         struct rte_eth_dev *dev;
988         int ret;
989
990         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
991
992         dev = &rte_eth_devices[port_id];
993
994         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
995         if (ret != 0)
996                 return ret;
997
998         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
999
1000         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1001                 RTE_ETHDEV_LOG(INFO,
1002                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1003                         rx_queue_id, port_id);
1004                 return -EINVAL;
1005         }
1006
1007         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1008                 RTE_ETHDEV_LOG(INFO,
1009                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1010                         rx_queue_id, port_id);
1011                 return 0;
1012         }
1013
1014         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1015
1016 }
1017
1018 int
1019 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1020 {
1021         struct rte_eth_dev *dev;
1022         int ret;
1023
1024         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1025
1026         dev = &rte_eth_devices[port_id];
1027         if (!dev->data->dev_started) {
1028                 RTE_ETHDEV_LOG(ERR,
1029                         "Port %u must be started before start any queue\n",
1030                         port_id);
1031                 return -EINVAL;
1032         }
1033
1034         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1035         if (ret != 0)
1036                 return ret;
1037
1038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1039
1040         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1041                 RTE_ETHDEV_LOG(INFO,
1042                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1043                         tx_queue_id, port_id);
1044                 return -EINVAL;
1045         }
1046
1047         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1048                 RTE_ETHDEV_LOG(INFO,
1049                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1050                         tx_queue_id, port_id);
1051                 return 0;
1052         }
1053
1054         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1055 }
1056
1057 int
1058 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1059 {
1060         struct rte_eth_dev *dev;
1061         int ret;
1062
1063         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1064
1065         dev = &rte_eth_devices[port_id];
1066
1067         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1068         if (ret != 0)
1069                 return ret;
1070
1071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1072
1073         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1074                 RTE_ETHDEV_LOG(INFO,
1075                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1076                         tx_queue_id, port_id);
1077                 return -EINVAL;
1078         }
1079
1080         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1081                 RTE_ETHDEV_LOG(INFO,
1082                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1083                         tx_queue_id, port_id);
1084                 return 0;
1085         }
1086
1087         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1088
1089 }
1090
1091 static int
1092 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1093 {
1094         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1095         void **txq;
1096         unsigned i;
1097
1098         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1099                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1100                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1101                                                    RTE_CACHE_LINE_SIZE);
1102                 if (dev->data->tx_queues == NULL) {
1103                         dev->data->nb_tx_queues = 0;
1104                         return -(ENOMEM);
1105                 }
1106         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1107                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1108
1109                 txq = dev->data->tx_queues;
1110
1111                 for (i = nb_queues; i < old_nb_queues; i++)
1112                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1113                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1114                                   RTE_CACHE_LINE_SIZE);
1115                 if (txq == NULL)
1116                         return -ENOMEM;
1117                 if (nb_queues > old_nb_queues) {
1118                         uint16_t new_qs = nb_queues - old_nb_queues;
1119
1120                         memset(txq + old_nb_queues, 0,
1121                                sizeof(txq[0]) * new_qs);
1122                 }
1123
1124                 dev->data->tx_queues = txq;
1125
1126         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1127                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1128
1129                 txq = dev->data->tx_queues;
1130
1131                 for (i = nb_queues; i < old_nb_queues; i++)
1132                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1133
1134                 rte_free(dev->data->tx_queues);
1135                 dev->data->tx_queues = NULL;
1136         }
1137         dev->data->nb_tx_queues = nb_queues;
1138         return 0;
1139 }
1140
1141 uint32_t
1142 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1143 {
1144         switch (speed) {
1145         case ETH_SPEED_NUM_10M:
1146                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1147         case ETH_SPEED_NUM_100M:
1148                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1149         case ETH_SPEED_NUM_1G:
1150                 return ETH_LINK_SPEED_1G;
1151         case ETH_SPEED_NUM_2_5G:
1152                 return ETH_LINK_SPEED_2_5G;
1153         case ETH_SPEED_NUM_5G:
1154                 return ETH_LINK_SPEED_5G;
1155         case ETH_SPEED_NUM_10G:
1156                 return ETH_LINK_SPEED_10G;
1157         case ETH_SPEED_NUM_20G:
1158                 return ETH_LINK_SPEED_20G;
1159         case ETH_SPEED_NUM_25G:
1160                 return ETH_LINK_SPEED_25G;
1161         case ETH_SPEED_NUM_40G:
1162                 return ETH_LINK_SPEED_40G;
1163         case ETH_SPEED_NUM_50G:
1164                 return ETH_LINK_SPEED_50G;
1165         case ETH_SPEED_NUM_56G:
1166                 return ETH_LINK_SPEED_56G;
1167         case ETH_SPEED_NUM_100G:
1168                 return ETH_LINK_SPEED_100G;
1169         case ETH_SPEED_NUM_200G:
1170                 return ETH_LINK_SPEED_200G;
1171         default:
1172                 return 0;
1173         }
1174 }
1175
1176 const char *
1177 rte_eth_dev_rx_offload_name(uint64_t offload)
1178 {
1179         const char *name = "UNKNOWN";
1180         unsigned int i;
1181
1182         for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1183                 if (offload == eth_dev_rx_offload_names[i].offload) {
1184                         name = eth_dev_rx_offload_names[i].name;
1185                         break;
1186                 }
1187         }
1188
1189         return name;
1190 }
1191
1192 const char *
1193 rte_eth_dev_tx_offload_name(uint64_t offload)
1194 {
1195         const char *name = "UNKNOWN";
1196         unsigned int i;
1197
1198         for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1199                 if (offload == eth_dev_tx_offload_names[i].offload) {
1200                         name = eth_dev_tx_offload_names[i].name;
1201                         break;
1202                 }
1203         }
1204
1205         return name;
1206 }
1207
1208 static inline int
1209 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1210                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1211 {
1212         int ret = 0;
1213
1214         if (dev_info_size == 0) {
1215                 if (config_size != max_rx_pkt_len) {
1216                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1217                                        " %u != %u is not allowed\n",
1218                                        port_id, config_size, max_rx_pkt_len);
1219                         ret = -EINVAL;
1220                 }
1221         } else if (config_size > dev_info_size) {
1222                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1223                                "> max allowed value %u\n", port_id, config_size,
1224                                dev_info_size);
1225                 ret = -EINVAL;
1226         } else if (config_size < RTE_ETHER_MIN_LEN) {
1227                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1228                                "< min allowed value %u\n", port_id, config_size,
1229                                (unsigned int)RTE_ETHER_MIN_LEN);
1230                 ret = -EINVAL;
1231         }
1232         return ret;
1233 }
1234
1235 /*
1236  * Validate offloads that are requested through rte_eth_dev_configure against
1237  * the offloads successfully set by the ethernet device.
1238  *
1239  * @param port_id
1240  *   The port identifier of the Ethernet device.
1241  * @param req_offloads
1242  *   The offloads that have been requested through `rte_eth_dev_configure`.
1243  * @param set_offloads
1244  *   The offloads successfully set by the ethernet device.
1245  * @param offload_type
1246  *   The offload type i.e. Rx/Tx string.
1247  * @param offload_name
1248  *   The function that prints the offload name.
1249  * @return
1250  *   - (0) if validation successful.
1251  *   - (-EINVAL) if requested offload has been silently disabled.
1252  *
1253  */
1254 static int
1255 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1256                   uint64_t set_offloads, const char *offload_type,
1257                   const char *(*offload_name)(uint64_t))
1258 {
1259         uint64_t offloads_diff = req_offloads ^ set_offloads;
1260         uint64_t offload;
1261         int ret = 0;
1262
1263         while (offloads_diff != 0) {
1264                 /* Check if any offload is requested but not enabled. */
1265                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1266                 if (offload & req_offloads) {
1267                         RTE_ETHDEV_LOG(ERR,
1268                                 "Port %u failed to enable %s offload %s\n",
1269                                 port_id, offload_type, offload_name(offload));
1270                         ret = -EINVAL;
1271                 }
1272
1273                 /* Check if offload couldn't be disabled. */
1274                 if (offload & set_offloads) {
1275                         RTE_ETHDEV_LOG(DEBUG,
1276                                 "Port %u %s offload %s is not requested but enabled\n",
1277                                 port_id, offload_type, offload_name(offload));
1278                 }
1279
1280                 offloads_diff &= ~offload;
1281         }
1282
1283         return ret;
1284 }
1285
1286 int
1287 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1288                       const struct rte_eth_conf *dev_conf)
1289 {
1290         struct rte_eth_dev *dev;
1291         struct rte_eth_dev_info dev_info;
1292         struct rte_eth_conf orig_conf;
1293         int diag;
1294         int ret;
1295
1296         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1297
1298         dev = &rte_eth_devices[port_id];
1299
1300         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1301
1302         if (dev->data->dev_started) {
1303                 RTE_ETHDEV_LOG(ERR,
1304                         "Port %u must be stopped to allow configuration\n",
1305                         port_id);
1306                 return -EBUSY;
1307         }
1308
1309          /* Store original config, as rollback required on failure */
1310         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1311
1312         /*
1313          * Copy the dev_conf parameter into the dev structure.
1314          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1315          */
1316         if (dev_conf != &dev->data->dev_conf)
1317                 memcpy(&dev->data->dev_conf, dev_conf,
1318                        sizeof(dev->data->dev_conf));
1319
1320         ret = rte_eth_dev_info_get(port_id, &dev_info);
1321         if (ret != 0)
1322                 goto rollback;
1323
1324         /* If number of queues specified by application for both Rx and Tx is
1325          * zero, use driver preferred values. This cannot be done individually
1326          * as it is valid for either Tx or Rx (but not both) to be zero.
1327          * If driver does not provide any preferred valued, fall back on
1328          * EAL defaults.
1329          */
1330         if (nb_rx_q == 0 && nb_tx_q == 0) {
1331                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1332                 if (nb_rx_q == 0)
1333                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1334                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1335                 if (nb_tx_q == 0)
1336                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1337         }
1338
1339         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1340                 RTE_ETHDEV_LOG(ERR,
1341                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1342                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1343                 ret = -EINVAL;
1344                 goto rollback;
1345         }
1346
1347         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1348                 RTE_ETHDEV_LOG(ERR,
1349                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1350                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1351                 ret = -EINVAL;
1352                 goto rollback;
1353         }
1354
1355         /*
1356          * Check that the numbers of RX and TX queues are not greater
1357          * than the maximum number of RX and TX queues supported by the
1358          * configured device.
1359          */
1360         if (nb_rx_q > dev_info.max_rx_queues) {
1361                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1362                         port_id, nb_rx_q, dev_info.max_rx_queues);
1363                 ret = -EINVAL;
1364                 goto rollback;
1365         }
1366
1367         if (nb_tx_q > dev_info.max_tx_queues) {
1368                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1369                         port_id, nb_tx_q, dev_info.max_tx_queues);
1370                 ret = -EINVAL;
1371                 goto rollback;
1372         }
1373
1374         /* Check that the device supports requested interrupts */
1375         if ((dev_conf->intr_conf.lsc == 1) &&
1376                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1377                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1378                         dev->device->driver->name);
1379                 ret = -EINVAL;
1380                 goto rollback;
1381         }
1382         if ((dev_conf->intr_conf.rmv == 1) &&
1383                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1384                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1385                         dev->device->driver->name);
1386                 ret = -EINVAL;
1387                 goto rollback;
1388         }
1389
1390         /*
1391          * If jumbo frames are enabled, check that the maximum RX packet
1392          * length is supported by the configured device.
1393          */
1394         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1395                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1396                         RTE_ETHDEV_LOG(ERR,
1397                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1398                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1399                                 dev_info.max_rx_pktlen);
1400                         ret = -EINVAL;
1401                         goto rollback;
1402                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1403                         RTE_ETHDEV_LOG(ERR,
1404                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1405                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1406                                 (unsigned int)RTE_ETHER_MIN_LEN);
1407                         ret = -EINVAL;
1408                         goto rollback;
1409                 }
1410         } else {
1411                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1412                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1413                         /* Use default value */
1414                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1415                                                         RTE_ETHER_MAX_LEN;
1416         }
1417
1418         /*
1419          * If LRO is enabled, check that the maximum aggregated packet
1420          * size is supported by the configured device.
1421          */
1422         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1423                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1424                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1425                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1426                 ret = eth_dev_check_lro_pkt_size(port_id,
1427                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1428                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1429                                 dev_info.max_lro_pkt_size);
1430                 if (ret != 0)
1431                         goto rollback;
1432         }
1433
1434         /* Any requested offloading must be within its device capabilities */
1435         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1436              dev_conf->rxmode.offloads) {
1437                 RTE_ETHDEV_LOG(ERR,
1438                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1439                         "capabilities 0x%"PRIx64" in %s()\n",
1440                         port_id, dev_conf->rxmode.offloads,
1441                         dev_info.rx_offload_capa,
1442                         __func__);
1443                 ret = -EINVAL;
1444                 goto rollback;
1445         }
1446         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1447              dev_conf->txmode.offloads) {
1448                 RTE_ETHDEV_LOG(ERR,
1449                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1450                         "capabilities 0x%"PRIx64" in %s()\n",
1451                         port_id, dev_conf->txmode.offloads,
1452                         dev_info.tx_offload_capa,
1453                         __func__);
1454                 ret = -EINVAL;
1455                 goto rollback;
1456         }
1457
1458         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1459                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1460
1461         /* Check that device supports requested rss hash functions. */
1462         if ((dev_info.flow_type_rss_offloads |
1463              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1464             dev_info.flow_type_rss_offloads) {
1465                 RTE_ETHDEV_LOG(ERR,
1466                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1467                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1468                         dev_info.flow_type_rss_offloads);
1469                 ret = -EINVAL;
1470                 goto rollback;
1471         }
1472
1473         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1474         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1475             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1476                 RTE_ETHDEV_LOG(ERR,
1477                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1478                         port_id,
1479                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1480                 ret = -EINVAL;
1481                 goto rollback;
1482         }
1483
1484         /*
1485          * Setup new number of RX/TX queues and reconfigure device.
1486          */
1487         diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1488         if (diag != 0) {
1489                 RTE_ETHDEV_LOG(ERR,
1490                         "Port%u eth_dev_rx_queue_config = %d\n",
1491                         port_id, diag);
1492                 ret = diag;
1493                 goto rollback;
1494         }
1495
1496         diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1497         if (diag != 0) {
1498                 RTE_ETHDEV_LOG(ERR,
1499                         "Port%u eth_dev_tx_queue_config = %d\n",
1500                         port_id, diag);
1501                 eth_dev_rx_queue_config(dev, 0);
1502                 ret = diag;
1503                 goto rollback;
1504         }
1505
1506         diag = (*dev->dev_ops->dev_configure)(dev);
1507         if (diag != 0) {
1508                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1509                         port_id, diag);
1510                 ret = eth_err(port_id, diag);
1511                 goto reset_queues;
1512         }
1513
1514         /* Initialize Rx profiling if enabled at compilation time. */
1515         diag = __rte_eth_dev_profile_init(port_id, dev);
1516         if (diag != 0) {
1517                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1518                         port_id, diag);
1519                 ret = eth_err(port_id, diag);
1520                 goto reset_queues;
1521         }
1522
1523         /* Validate Rx offloads. */
1524         diag = eth_dev_validate_offloads(port_id,
1525                         dev_conf->rxmode.offloads,
1526                         dev->data->dev_conf.rxmode.offloads, "Rx",
1527                         rte_eth_dev_rx_offload_name);
1528         if (diag != 0) {
1529                 ret = diag;
1530                 goto reset_queues;
1531         }
1532
1533         /* Validate Tx offloads. */
1534         diag = eth_dev_validate_offloads(port_id,
1535                         dev_conf->txmode.offloads,
1536                         dev->data->dev_conf.txmode.offloads, "Tx",
1537                         rte_eth_dev_tx_offload_name);
1538         if (diag != 0) {
1539                 ret = diag;
1540                 goto reset_queues;
1541         }
1542
1543         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1544         return 0;
1545 reset_queues:
1546         eth_dev_rx_queue_config(dev, 0);
1547         eth_dev_tx_queue_config(dev, 0);
1548 rollback:
1549         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1550
1551         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1552         return ret;
1553 }
1554
1555 void
1556 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1557 {
1558         if (dev->data->dev_started) {
1559                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1560                         dev->data->port_id);
1561                 return;
1562         }
1563
1564         eth_dev_rx_queue_config(dev, 0);
1565         eth_dev_tx_queue_config(dev, 0);
1566
1567         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1568 }
1569
1570 static void
1571 eth_dev_mac_restore(struct rte_eth_dev *dev,
1572                         struct rte_eth_dev_info *dev_info)
1573 {
1574         struct rte_ether_addr *addr;
1575         uint16_t i;
1576         uint32_t pool = 0;
1577         uint64_t pool_mask;
1578
1579         /* replay MAC address configuration including default MAC */
1580         addr = &dev->data->mac_addrs[0];
1581         if (*dev->dev_ops->mac_addr_set != NULL)
1582                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1583         else if (*dev->dev_ops->mac_addr_add != NULL)
1584                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1585
1586         if (*dev->dev_ops->mac_addr_add != NULL) {
1587                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1588                         addr = &dev->data->mac_addrs[i];
1589
1590                         /* skip zero address */
1591                         if (rte_is_zero_ether_addr(addr))
1592                                 continue;
1593
1594                         pool = 0;
1595                         pool_mask = dev->data->mac_pool_sel[i];
1596
1597                         do {
1598                                 if (pool_mask & 1ULL)
1599                                         (*dev->dev_ops->mac_addr_add)(dev,
1600                                                 addr, i, pool);
1601                                 pool_mask >>= 1;
1602                                 pool++;
1603                         } while (pool_mask);
1604                 }
1605         }
1606 }
1607
1608 static int
1609 eth_dev_config_restore(struct rte_eth_dev *dev,
1610                 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1611 {
1612         int ret;
1613
1614         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1615                 eth_dev_mac_restore(dev, dev_info);
1616
1617         /* replay promiscuous configuration */
1618         /*
1619          * use callbacks directly since we don't need port_id check and
1620          * would like to bypass the same value set
1621          */
1622         if (rte_eth_promiscuous_get(port_id) == 1 &&
1623             *dev->dev_ops->promiscuous_enable != NULL) {
1624                 ret = eth_err(port_id,
1625                               (*dev->dev_ops->promiscuous_enable)(dev));
1626                 if (ret != 0 && ret != -ENOTSUP) {
1627                         RTE_ETHDEV_LOG(ERR,
1628                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1629                                 port_id, rte_strerror(-ret));
1630                         return ret;
1631                 }
1632         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1633                    *dev->dev_ops->promiscuous_disable != NULL) {
1634                 ret = eth_err(port_id,
1635                               (*dev->dev_ops->promiscuous_disable)(dev));
1636                 if (ret != 0 && ret != -ENOTSUP) {
1637                         RTE_ETHDEV_LOG(ERR,
1638                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1639                                 port_id, rte_strerror(-ret));
1640                         return ret;
1641                 }
1642         }
1643
1644         /* replay all multicast configuration */
1645         /*
1646          * use callbacks directly since we don't need port_id check and
1647          * would like to bypass the same value set
1648          */
1649         if (rte_eth_allmulticast_get(port_id) == 1 &&
1650             *dev->dev_ops->allmulticast_enable != NULL) {
1651                 ret = eth_err(port_id,
1652                               (*dev->dev_ops->allmulticast_enable)(dev));
1653                 if (ret != 0 && ret != -ENOTSUP) {
1654                         RTE_ETHDEV_LOG(ERR,
1655                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1656                                 port_id, rte_strerror(-ret));
1657                         return ret;
1658                 }
1659         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1660                    *dev->dev_ops->allmulticast_disable != NULL) {
1661                 ret = eth_err(port_id,
1662                               (*dev->dev_ops->allmulticast_disable)(dev));
1663                 if (ret != 0 && ret != -ENOTSUP) {
1664                         RTE_ETHDEV_LOG(ERR,
1665                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1666                                 port_id, rte_strerror(-ret));
1667                         return ret;
1668                 }
1669         }
1670
1671         return 0;
1672 }
1673
1674 int
1675 rte_eth_dev_start(uint16_t port_id)
1676 {
1677         struct rte_eth_dev *dev;
1678         struct rte_eth_dev_info dev_info;
1679         int diag;
1680         int ret, ret_stop;
1681
1682         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1683
1684         dev = &rte_eth_devices[port_id];
1685
1686         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1687
1688         if (dev->data->dev_started != 0) {
1689                 RTE_ETHDEV_LOG(INFO,
1690                         "Device with port_id=%"PRIu16" already started\n",
1691                         port_id);
1692                 return 0;
1693         }
1694
1695         ret = rte_eth_dev_info_get(port_id, &dev_info);
1696         if (ret != 0)
1697                 return ret;
1698
1699         /* Lets restore MAC now if device does not support live change */
1700         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1701                 eth_dev_mac_restore(dev, &dev_info);
1702
1703         diag = (*dev->dev_ops->dev_start)(dev);
1704         if (diag == 0)
1705                 dev->data->dev_started = 1;
1706         else
1707                 return eth_err(port_id, diag);
1708
1709         ret = eth_dev_config_restore(dev, &dev_info, port_id);
1710         if (ret != 0) {
1711                 RTE_ETHDEV_LOG(ERR,
1712                         "Error during restoring configuration for device (port %u): %s\n",
1713                         port_id, rte_strerror(-ret));
1714                 ret_stop = rte_eth_dev_stop(port_id);
1715                 if (ret_stop != 0) {
1716                         RTE_ETHDEV_LOG(ERR,
1717                                 "Failed to stop device (port %u): %s\n",
1718                                 port_id, rte_strerror(-ret_stop));
1719                 }
1720
1721                 return ret;
1722         }
1723
1724         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1725                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1726                 (*dev->dev_ops->link_update)(dev, 0);
1727         }
1728
1729         rte_ethdev_trace_start(port_id);
1730         return 0;
1731 }
1732
1733 int
1734 rte_eth_dev_stop(uint16_t port_id)
1735 {
1736         struct rte_eth_dev *dev;
1737         int ret;
1738
1739         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1740         dev = &rte_eth_devices[port_id];
1741
1742         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1743
1744         if (dev->data->dev_started == 0) {
1745                 RTE_ETHDEV_LOG(INFO,
1746                         "Device with port_id=%"PRIu16" already stopped\n",
1747                         port_id);
1748                 return 0;
1749         }
1750
1751         dev->data->dev_started = 0;
1752         ret = (*dev->dev_ops->dev_stop)(dev);
1753         rte_ethdev_trace_stop(port_id, ret);
1754
1755         return ret;
1756 }
1757
1758 int
1759 rte_eth_dev_set_link_up(uint16_t port_id)
1760 {
1761         struct rte_eth_dev *dev;
1762
1763         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1764
1765         dev = &rte_eth_devices[port_id];
1766
1767         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1768         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1769 }
1770
1771 int
1772 rte_eth_dev_set_link_down(uint16_t port_id)
1773 {
1774         struct rte_eth_dev *dev;
1775
1776         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1777
1778         dev = &rte_eth_devices[port_id];
1779
1780         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1781         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1782 }
1783
1784 int
1785 rte_eth_dev_close(uint16_t port_id)
1786 {
1787         struct rte_eth_dev *dev;
1788         int firsterr, binerr;
1789         int *lasterr = &firsterr;
1790
1791         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1792         dev = &rte_eth_devices[port_id];
1793
1794         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1795         *lasterr = (*dev->dev_ops->dev_close)(dev);
1796         if (*lasterr != 0)
1797                 lasterr = &binerr;
1798
1799         rte_ethdev_trace_close(port_id);
1800         *lasterr = rte_eth_dev_release_port(dev);
1801
1802         return eth_err(port_id, firsterr);
1803 }
1804
1805 int
1806 rte_eth_dev_reset(uint16_t port_id)
1807 {
1808         struct rte_eth_dev *dev;
1809         int ret;
1810
1811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1812         dev = &rte_eth_devices[port_id];
1813
1814         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1815
1816         ret = rte_eth_dev_stop(port_id);
1817         if (ret != 0) {
1818                 RTE_ETHDEV_LOG(ERR,
1819                         "Failed to stop device (port %u) before reset: %s - ignore\n",
1820                         port_id, rte_strerror(-ret));
1821         }
1822         ret = dev->dev_ops->dev_reset(dev);
1823
1824         return eth_err(port_id, ret);
1825 }
1826
1827 int
1828 rte_eth_dev_is_removed(uint16_t port_id)
1829 {
1830         struct rte_eth_dev *dev;
1831         int ret;
1832
1833         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1834
1835         dev = &rte_eth_devices[port_id];
1836
1837         if (dev->state == RTE_ETH_DEV_REMOVED)
1838                 return 1;
1839
1840         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1841
1842         ret = dev->dev_ops->is_removed(dev);
1843         if (ret != 0)
1844                 /* Device is physically removed. */
1845                 dev->state = RTE_ETH_DEV_REMOVED;
1846
1847         return ret;
1848 }
1849
1850 static int
1851 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1852                              uint16_t n_seg, uint32_t *mbp_buf_size,
1853                              const struct rte_eth_dev_info *dev_info)
1854 {
1855         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1856         struct rte_mempool *mp_first;
1857         uint32_t offset_mask;
1858         uint16_t seg_idx;
1859
1860         if (n_seg > seg_capa->max_nseg) {
1861                 RTE_ETHDEV_LOG(ERR,
1862                                "Requested Rx segments %u exceed supported %u\n",
1863                                n_seg, seg_capa->max_nseg);
1864                 return -EINVAL;
1865         }
1866         /*
1867          * Check the sizes and offsets against buffer sizes
1868          * for each segment specified in extended configuration.
1869          */
1870         mp_first = rx_seg[0].mp;
1871         offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1872         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1873                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1874                 uint32_t length = rx_seg[seg_idx].length;
1875                 uint32_t offset = rx_seg[seg_idx].offset;
1876
1877                 if (mpl == NULL) {
1878                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1879                         return -EINVAL;
1880                 }
1881                 if (seg_idx != 0 && mp_first != mpl &&
1882                     seg_capa->multi_pools == 0) {
1883                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1884                         return -ENOTSUP;
1885                 }
1886                 if (offset != 0) {
1887                         if (seg_capa->offset_allowed == 0) {
1888                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1889                                 return -ENOTSUP;
1890                         }
1891                         if (offset & offset_mask) {
1892                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1893                                                offset,
1894                                                seg_capa->offset_align_log2);
1895                                 return -EINVAL;
1896                         }
1897                 }
1898                 if (mpl->private_data_size <
1899                         sizeof(struct rte_pktmbuf_pool_private)) {
1900                         RTE_ETHDEV_LOG(ERR,
1901                                        "%s private_data_size %u < %u\n",
1902                                        mpl->name, mpl->private_data_size,
1903                                        (unsigned int)sizeof
1904                                         (struct rte_pktmbuf_pool_private));
1905                         return -ENOSPC;
1906                 }
1907                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1908                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1909                 length = length != 0 ? length : *mbp_buf_size;
1910                 if (*mbp_buf_size < length + offset) {
1911                         RTE_ETHDEV_LOG(ERR,
1912                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1913                                        mpl->name, *mbp_buf_size,
1914                                        length + offset, length, offset);
1915                         return -EINVAL;
1916                 }
1917         }
1918         return 0;
1919 }
1920
1921 int
1922 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1923                        uint16_t nb_rx_desc, unsigned int socket_id,
1924                        const struct rte_eth_rxconf *rx_conf,
1925                        struct rte_mempool *mp)
1926 {
1927         int ret;
1928         uint32_t mbp_buf_size;
1929         struct rte_eth_dev *dev;
1930         struct rte_eth_dev_info dev_info;
1931         struct rte_eth_rxconf local_conf;
1932         void **rxq;
1933
1934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1935
1936         dev = &rte_eth_devices[port_id];
1937         if (rx_queue_id >= dev->data->nb_rx_queues) {
1938                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1939                 return -EINVAL;
1940         }
1941
1942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1943
1944         ret = rte_eth_dev_info_get(port_id, &dev_info);
1945         if (ret != 0)
1946                 return ret;
1947
1948         if (mp != NULL) {
1949                 /* Single pool configuration check. */
1950                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1951                         RTE_ETHDEV_LOG(ERR,
1952                                        "Ambiguous segment configuration\n");
1953                         return -EINVAL;
1954                 }
1955                 /*
1956                  * Check the size of the mbuf data buffer, this value
1957                  * must be provided in the private data of the memory pool.
1958                  * First check that the memory pool(s) has a valid private data.
1959                  */
1960                 if (mp->private_data_size <
1961                                 sizeof(struct rte_pktmbuf_pool_private)) {
1962                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1963                                 mp->name, mp->private_data_size,
1964                                 (unsigned int)
1965                                 sizeof(struct rte_pktmbuf_pool_private));
1966                         return -ENOSPC;
1967                 }
1968                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1969                 if (mbp_buf_size < dev_info.min_rx_bufsize +
1970                                    RTE_PKTMBUF_HEADROOM) {
1971                         RTE_ETHDEV_LOG(ERR,
1972                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1973                                        mp->name, mbp_buf_size,
1974                                        RTE_PKTMBUF_HEADROOM +
1975                                        dev_info.min_rx_bufsize,
1976                                        RTE_PKTMBUF_HEADROOM,
1977                                        dev_info.min_rx_bufsize);
1978                         return -EINVAL;
1979                 }
1980         } else {
1981                 const struct rte_eth_rxseg_split *rx_seg;
1982                 uint16_t n_seg;
1983
1984                 /* Extended multi-segment configuration check. */
1985                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
1986                         RTE_ETHDEV_LOG(ERR,
1987                                        "Memory pool is null and no extended configuration provided\n");
1988                         return -EINVAL;
1989                 }
1990
1991                 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
1992                 n_seg = rx_conf->rx_nseg;
1993
1994                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1995                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
1996                                                            &mbp_buf_size,
1997                                                            &dev_info);
1998                         if (ret != 0)
1999                                 return ret;
2000                 } else {
2001                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2002                         return -EINVAL;
2003                 }
2004         }
2005
2006         /* Use default specified by driver, if nb_rx_desc is zero */
2007         if (nb_rx_desc == 0) {
2008                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
2009                 /* If driver default is also zero, fall back on EAL default */
2010                 if (nb_rx_desc == 0)
2011                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2012         }
2013
2014         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2015                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2016                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2017
2018                 RTE_ETHDEV_LOG(ERR,
2019                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2020                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2021                         dev_info.rx_desc_lim.nb_min,
2022                         dev_info.rx_desc_lim.nb_align);
2023                 return -EINVAL;
2024         }
2025
2026         if (dev->data->dev_started &&
2027                 !(dev_info.dev_capa &
2028                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2029                 return -EBUSY;
2030
2031         if (dev->data->dev_started &&
2032                 (dev->data->rx_queue_state[rx_queue_id] !=
2033                         RTE_ETH_QUEUE_STATE_STOPPED))
2034                 return -EBUSY;
2035
2036         rxq = dev->data->rx_queues;
2037         if (rxq[rx_queue_id]) {
2038                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2039                                         -ENOTSUP);
2040                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2041                 rxq[rx_queue_id] = NULL;
2042         }
2043
2044         if (rx_conf == NULL)
2045                 rx_conf = &dev_info.default_rxconf;
2046
2047         local_conf = *rx_conf;
2048
2049         /*
2050          * If an offloading has already been enabled in
2051          * rte_eth_dev_configure(), it has been enabled on all queues,
2052          * so there is no need to enable it in this queue again.
2053          * The local_conf.offloads input to underlying PMD only carries
2054          * those offloadings which are only enabled on this queue and
2055          * not enabled on all queues.
2056          */
2057         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2058
2059         /*
2060          * New added offloadings for this queue are those not enabled in
2061          * rte_eth_dev_configure() and they must be per-queue type.
2062          * A pure per-port offloading can't be enabled on a queue while
2063          * disabled on another queue. A pure per-port offloading can't
2064          * be enabled for any queue as new added one if it hasn't been
2065          * enabled in rte_eth_dev_configure().
2066          */
2067         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2068              local_conf.offloads) {
2069                 RTE_ETHDEV_LOG(ERR,
2070                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2071                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2072                         port_id, rx_queue_id, local_conf.offloads,
2073                         dev_info.rx_queue_offload_capa,
2074                         __func__);
2075                 return -EINVAL;
2076         }
2077
2078         /*
2079          * If LRO is enabled, check that the maximum aggregated packet
2080          * size is supported by the configured device.
2081          */
2082         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2083                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2084                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
2085                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2086                 int ret = eth_dev_check_lro_pkt_size(port_id,
2087                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2088                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2089                                 dev_info.max_lro_pkt_size);
2090                 if (ret != 0)
2091                         return ret;
2092         }
2093
2094         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2095                                               socket_id, &local_conf, mp);
2096         if (!ret) {
2097                 if (!dev->data->min_rx_buf_size ||
2098                     dev->data->min_rx_buf_size > mbp_buf_size)
2099                         dev->data->min_rx_buf_size = mbp_buf_size;
2100         }
2101
2102         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2103                 rx_conf, ret);
2104         return eth_err(port_id, ret);
2105 }
2106
2107 int
2108 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2109                                uint16_t nb_rx_desc,
2110                                const struct rte_eth_hairpin_conf *conf)
2111 {
2112         int ret;
2113         struct rte_eth_dev *dev;
2114         struct rte_eth_hairpin_cap cap;
2115         void **rxq;
2116         int i;
2117         int count;
2118
2119         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2120
2121         dev = &rte_eth_devices[port_id];
2122         if (rx_queue_id >= dev->data->nb_rx_queues) {
2123                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2124                 return -EINVAL;
2125         }
2126         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2127         if (ret != 0)
2128                 return ret;
2129         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2130                                 -ENOTSUP);
2131         /* if nb_rx_desc is zero use max number of desc from the driver. */
2132         if (nb_rx_desc == 0)
2133                 nb_rx_desc = cap.max_nb_desc;
2134         if (nb_rx_desc > cap.max_nb_desc) {
2135                 RTE_ETHDEV_LOG(ERR,
2136                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2137                         nb_rx_desc, cap.max_nb_desc);
2138                 return -EINVAL;
2139         }
2140         if (conf->peer_count > cap.max_rx_2_tx) {
2141                 RTE_ETHDEV_LOG(ERR,
2142                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2143                         conf->peer_count, cap.max_rx_2_tx);
2144                 return -EINVAL;
2145         }
2146         if (conf->peer_count == 0) {
2147                 RTE_ETHDEV_LOG(ERR,
2148                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2149                         conf->peer_count);
2150                 return -EINVAL;
2151         }
2152         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2153              cap.max_nb_queues != UINT16_MAX; i++) {
2154                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2155                         count++;
2156         }
2157         if (count > cap.max_nb_queues) {
2158                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2159                 cap.max_nb_queues);
2160                 return -EINVAL;
2161         }
2162         if (dev->data->dev_started)
2163                 return -EBUSY;
2164         rxq = dev->data->rx_queues;
2165         if (rxq[rx_queue_id] != NULL) {
2166                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2167                                         -ENOTSUP);
2168                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2169                 rxq[rx_queue_id] = NULL;
2170         }
2171         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2172                                                       nb_rx_desc, conf);
2173         if (ret == 0)
2174                 dev->data->rx_queue_state[rx_queue_id] =
2175                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2176         return eth_err(port_id, ret);
2177 }
2178
2179 int
2180 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2181                        uint16_t nb_tx_desc, unsigned int socket_id,
2182                        const struct rte_eth_txconf *tx_conf)
2183 {
2184         struct rte_eth_dev *dev;
2185         struct rte_eth_dev_info dev_info;
2186         struct rte_eth_txconf local_conf;
2187         void **txq;
2188         int ret;
2189
2190         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2191
2192         dev = &rte_eth_devices[port_id];
2193         if (tx_queue_id >= dev->data->nb_tx_queues) {
2194                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2195                 return -EINVAL;
2196         }
2197
2198         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2199
2200         ret = rte_eth_dev_info_get(port_id, &dev_info);
2201         if (ret != 0)
2202                 return ret;
2203
2204         /* Use default specified by driver, if nb_tx_desc is zero */
2205         if (nb_tx_desc == 0) {
2206                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2207                 /* If driver default is zero, fall back on EAL default */
2208                 if (nb_tx_desc == 0)
2209                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2210         }
2211         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2212             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2213             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2214                 RTE_ETHDEV_LOG(ERR,
2215                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2216                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2217                         dev_info.tx_desc_lim.nb_min,
2218                         dev_info.tx_desc_lim.nb_align);
2219                 return -EINVAL;
2220         }
2221
2222         if (dev->data->dev_started &&
2223                 !(dev_info.dev_capa &
2224                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2225                 return -EBUSY;
2226
2227         if (dev->data->dev_started &&
2228                 (dev->data->tx_queue_state[tx_queue_id] !=
2229                         RTE_ETH_QUEUE_STATE_STOPPED))
2230                 return -EBUSY;
2231
2232         txq = dev->data->tx_queues;
2233         if (txq[tx_queue_id]) {
2234                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2235                                         -ENOTSUP);
2236                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2237                 txq[tx_queue_id] = NULL;
2238         }
2239
2240         if (tx_conf == NULL)
2241                 tx_conf = &dev_info.default_txconf;
2242
2243         local_conf = *tx_conf;
2244
2245         /*
2246          * If an offloading has already been enabled in
2247          * rte_eth_dev_configure(), it has been enabled on all queues,
2248          * so there is no need to enable it in this queue again.
2249          * The local_conf.offloads input to underlying PMD only carries
2250          * those offloadings which are only enabled on this queue and
2251          * not enabled on all queues.
2252          */
2253         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2254
2255         /*
2256          * New added offloadings for this queue are those not enabled in
2257          * rte_eth_dev_configure() and they must be per-queue type.
2258          * A pure per-port offloading can't be enabled on a queue while
2259          * disabled on another queue. A pure per-port offloading can't
2260          * be enabled for any queue as new added one if it hasn't been
2261          * enabled in rte_eth_dev_configure().
2262          */
2263         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2264              local_conf.offloads) {
2265                 RTE_ETHDEV_LOG(ERR,
2266                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2267                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2268                         port_id, tx_queue_id, local_conf.offloads,
2269                         dev_info.tx_queue_offload_capa,
2270                         __func__);
2271                 return -EINVAL;
2272         }
2273
2274         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2275         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2276                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2277 }
2278
2279 int
2280 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2281                                uint16_t nb_tx_desc,
2282                                const struct rte_eth_hairpin_conf *conf)
2283 {
2284         struct rte_eth_dev *dev;
2285         struct rte_eth_hairpin_cap cap;
2286         void **txq;
2287         int i;
2288         int count;
2289         int ret;
2290
2291         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2292         dev = &rte_eth_devices[port_id];
2293         if (tx_queue_id >= dev->data->nb_tx_queues) {
2294                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2295                 return -EINVAL;
2296         }
2297         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2298         if (ret != 0)
2299                 return ret;
2300         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2301                                 -ENOTSUP);
2302         /* if nb_rx_desc is zero use max number of desc from the driver. */
2303         if (nb_tx_desc == 0)
2304                 nb_tx_desc = cap.max_nb_desc;
2305         if (nb_tx_desc > cap.max_nb_desc) {
2306                 RTE_ETHDEV_LOG(ERR,
2307                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2308                         nb_tx_desc, cap.max_nb_desc);
2309                 return -EINVAL;
2310         }
2311         if (conf->peer_count > cap.max_tx_2_rx) {
2312                 RTE_ETHDEV_LOG(ERR,
2313                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2314                         conf->peer_count, cap.max_tx_2_rx);
2315                 return -EINVAL;
2316         }
2317         if (conf->peer_count == 0) {
2318                 RTE_ETHDEV_LOG(ERR,
2319                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2320                         conf->peer_count);
2321                 return -EINVAL;
2322         }
2323         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2324              cap.max_nb_queues != UINT16_MAX; i++) {
2325                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2326                         count++;
2327         }
2328         if (count > cap.max_nb_queues) {
2329                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2330                 cap.max_nb_queues);
2331                 return -EINVAL;
2332         }
2333         if (dev->data->dev_started)
2334                 return -EBUSY;
2335         txq = dev->data->tx_queues;
2336         if (txq[tx_queue_id] != NULL) {
2337                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2338                                         -ENOTSUP);
2339                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2340                 txq[tx_queue_id] = NULL;
2341         }
2342         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2343                 (dev, tx_queue_id, nb_tx_desc, conf);
2344         if (ret == 0)
2345                 dev->data->tx_queue_state[tx_queue_id] =
2346                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2347         return eth_err(port_id, ret);
2348 }
2349
2350 int
2351 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2352 {
2353         struct rte_eth_dev *dev;
2354         int ret;
2355
2356         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2357         dev = &rte_eth_devices[tx_port];
2358         if (dev->data->dev_started == 0) {
2359                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2360                 return -EBUSY;
2361         }
2362
2363         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2364         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2365         if (ret != 0)
2366                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2367                                " to Rx %d (%d - all ports)\n",
2368                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2369
2370         return ret;
2371 }
2372
2373 int
2374 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2375 {
2376         struct rte_eth_dev *dev;
2377         int ret;
2378
2379         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2380         dev = &rte_eth_devices[tx_port];
2381         if (dev->data->dev_started == 0) {
2382                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2383                 return -EBUSY;
2384         }
2385
2386         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2387         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2388         if (ret != 0)
2389                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2390                                " from Rx %d (%d - all ports)\n",
2391                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2392
2393         return ret;
2394 }
2395
2396 int
2397 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2398                                size_t len, uint32_t direction)
2399 {
2400         struct rte_eth_dev *dev;
2401         int ret;
2402
2403         if (peer_ports == NULL || len == 0)
2404                 return -EINVAL;
2405
2406         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2407         dev = &rte_eth_devices[port_id];
2408         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2409                                 -ENOTSUP);
2410
2411         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2412                                                       len, direction);
2413         if (ret < 0)
2414                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2415                                port_id, direction ? "Rx" : "Tx");
2416
2417         return ret;
2418 }
2419
2420 void
2421 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2422                 void *userdata __rte_unused)
2423 {
2424         rte_pktmbuf_free_bulk(pkts, unsent);
2425 }
2426
2427 void
2428 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2429                 void *userdata)
2430 {
2431         uint64_t *count = userdata;
2432
2433         rte_pktmbuf_free_bulk(pkts, unsent);
2434         *count += unsent;
2435 }
2436
2437 int
2438 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2439                 buffer_tx_error_fn cbfn, void *userdata)
2440 {
2441         buffer->error_callback = cbfn;
2442         buffer->error_userdata = userdata;
2443         return 0;
2444 }
2445
2446 int
2447 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2448 {
2449         int ret = 0;
2450
2451         if (buffer == NULL)
2452                 return -EINVAL;
2453
2454         buffer->size = size;
2455         if (buffer->error_callback == NULL) {
2456                 ret = rte_eth_tx_buffer_set_err_callback(
2457                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2458         }
2459
2460         return ret;
2461 }
2462
2463 int
2464 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2465 {
2466         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2467         int ret;
2468
2469         /* Validate Input Data. Bail if not valid or not supported. */
2470         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2471         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2472
2473         /* Call driver to free pending mbufs. */
2474         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2475                                                free_cnt);
2476         return eth_err(port_id, ret);
2477 }
2478
2479 int
2480 rte_eth_promiscuous_enable(uint16_t port_id)
2481 {
2482         struct rte_eth_dev *dev;
2483         int diag = 0;
2484
2485         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2486         dev = &rte_eth_devices[port_id];
2487
2488         if (dev->data->promiscuous == 1)
2489                 return 0;
2490
2491         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2492
2493         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2494         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2495
2496         return eth_err(port_id, diag);
2497 }
2498
2499 int
2500 rte_eth_promiscuous_disable(uint16_t port_id)
2501 {
2502         struct rte_eth_dev *dev;
2503         int diag = 0;
2504
2505         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2506         dev = &rte_eth_devices[port_id];
2507
2508         if (dev->data->promiscuous == 0)
2509                 return 0;
2510
2511         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2512
2513         dev->data->promiscuous = 0;
2514         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2515         if (diag != 0)
2516                 dev->data->promiscuous = 1;
2517
2518         return eth_err(port_id, diag);
2519 }
2520
2521 int
2522 rte_eth_promiscuous_get(uint16_t port_id)
2523 {
2524         struct rte_eth_dev *dev;
2525
2526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2527
2528         dev = &rte_eth_devices[port_id];
2529         return dev->data->promiscuous;
2530 }
2531
2532 int
2533 rte_eth_allmulticast_enable(uint16_t port_id)
2534 {
2535         struct rte_eth_dev *dev;
2536         int diag;
2537
2538         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2539         dev = &rte_eth_devices[port_id];
2540
2541         if (dev->data->all_multicast == 1)
2542                 return 0;
2543
2544         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2545         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2546         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2547
2548         return eth_err(port_id, diag);
2549 }
2550
2551 int
2552 rte_eth_allmulticast_disable(uint16_t port_id)
2553 {
2554         struct rte_eth_dev *dev;
2555         int diag;
2556
2557         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2558         dev = &rte_eth_devices[port_id];
2559
2560         if (dev->data->all_multicast == 0)
2561                 return 0;
2562
2563         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2564         dev->data->all_multicast = 0;
2565         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2566         if (diag != 0)
2567                 dev->data->all_multicast = 1;
2568
2569         return eth_err(port_id, diag);
2570 }
2571
2572 int
2573 rte_eth_allmulticast_get(uint16_t port_id)
2574 {
2575         struct rte_eth_dev *dev;
2576
2577         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2578
2579         dev = &rte_eth_devices[port_id];
2580         return dev->data->all_multicast;
2581 }
2582
2583 int
2584 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2585 {
2586         struct rte_eth_dev *dev;
2587
2588         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2589         dev = &rte_eth_devices[port_id];
2590
2591         if (dev->data->dev_conf.intr_conf.lsc &&
2592             dev->data->dev_started)
2593                 rte_eth_linkstatus_get(dev, eth_link);
2594         else {
2595                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2596                 (*dev->dev_ops->link_update)(dev, 1);
2597                 *eth_link = dev->data->dev_link;
2598         }
2599
2600         return 0;
2601 }
2602
2603 int
2604 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2605 {
2606         struct rte_eth_dev *dev;
2607
2608         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2609         dev = &rte_eth_devices[port_id];
2610
2611         if (dev->data->dev_conf.intr_conf.lsc &&
2612             dev->data->dev_started)
2613                 rte_eth_linkstatus_get(dev, eth_link);
2614         else {
2615                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2616                 (*dev->dev_ops->link_update)(dev, 0);
2617                 *eth_link = dev->data->dev_link;
2618         }
2619
2620         return 0;
2621 }
2622
2623 const char *
2624 rte_eth_link_speed_to_str(uint32_t link_speed)
2625 {
2626         switch (link_speed) {
2627         case ETH_SPEED_NUM_NONE: return "None";
2628         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2629         case ETH_SPEED_NUM_100M: return "100 Mbps";
2630         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2631         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2632         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2633         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2634         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2635         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2636         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2637         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2638         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2639         case ETH_SPEED_NUM_100G: return "100 Gbps";
2640         case ETH_SPEED_NUM_200G: return "200 Gbps";
2641         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2642         default: return "Invalid";
2643         }
2644 }
2645
2646 int
2647 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2648 {
2649         if (eth_link->link_status == ETH_LINK_DOWN)
2650                 return snprintf(str, len, "Link down");
2651         else
2652                 return snprintf(str, len, "Link up at %s %s %s",
2653                         rte_eth_link_speed_to_str(eth_link->link_speed),
2654                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2655                         "FDX" : "HDX",
2656                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2657                         "Autoneg" : "Fixed");
2658 }
2659
2660 int
2661 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2662 {
2663         struct rte_eth_dev *dev;
2664
2665         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2666
2667         dev = &rte_eth_devices[port_id];
2668         memset(stats, 0, sizeof(*stats));
2669
2670         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2671         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2672         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2673 }
2674
2675 int
2676 rte_eth_stats_reset(uint16_t port_id)
2677 {
2678         struct rte_eth_dev *dev;
2679         int ret;
2680
2681         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2682         dev = &rte_eth_devices[port_id];
2683
2684         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2685         ret = (*dev->dev_ops->stats_reset)(dev);
2686         if (ret != 0)
2687                 return eth_err(port_id, ret);
2688
2689         dev->data->rx_mbuf_alloc_failed = 0;
2690
2691         return 0;
2692 }
2693
2694 static inline int
2695 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2696 {
2697         uint16_t nb_rxqs, nb_txqs;
2698         int count;
2699
2700         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2701         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2702
2703         count = RTE_NB_STATS;
2704         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2705                 count += nb_rxqs * RTE_NB_RXQ_STATS;
2706                 count += nb_txqs * RTE_NB_TXQ_STATS;
2707         }
2708
2709         return count;
2710 }
2711
2712 static int
2713 eth_dev_get_xstats_count(uint16_t port_id)
2714 {
2715         struct rte_eth_dev *dev;
2716         int count;
2717
2718         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2719         dev = &rte_eth_devices[port_id];
2720         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2721                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2722                                 NULL, 0);
2723                 if (count < 0)
2724                         return eth_err(port_id, count);
2725         }
2726         if (dev->dev_ops->xstats_get_names != NULL) {
2727                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2728                 if (count < 0)
2729                         return eth_err(port_id, count);
2730         } else
2731                 count = 0;
2732
2733
2734         count += eth_dev_get_xstats_basic_count(dev);
2735
2736         return count;
2737 }
2738
2739 int
2740 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2741                 uint64_t *id)
2742 {
2743         int cnt_xstats, idx_xstat;
2744
2745         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2746
2747         if (!id) {
2748                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2749                 return -ENOMEM;
2750         }
2751
2752         if (!xstat_name) {
2753                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2754                 return -ENOMEM;
2755         }
2756
2757         /* Get count */
2758         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2759         if (cnt_xstats  < 0) {
2760                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2761                 return -ENODEV;
2762         }
2763
2764         /* Get id-name lookup table */
2765         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2766
2767         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2768                         port_id, xstats_names, cnt_xstats, NULL)) {
2769                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2770                 return -1;
2771         }
2772
2773         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2774                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2775                         *id = idx_xstat;
2776                         return 0;
2777                 };
2778         }
2779
2780         return -EINVAL;
2781 }
2782
2783 /* retrieve basic stats names */
2784 static int
2785 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2786         struct rte_eth_xstat_name *xstats_names)
2787 {
2788         int cnt_used_entries = 0;
2789         uint32_t idx, id_queue;
2790         uint16_t num_q;
2791
2792         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2793                 strlcpy(xstats_names[cnt_used_entries].name,
2794                         eth_dev_stats_strings[idx].name,
2795                         sizeof(xstats_names[0].name));
2796                 cnt_used_entries++;
2797         }
2798
2799         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2800                 return cnt_used_entries;
2801
2802         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2803         for (id_queue = 0; id_queue < num_q; id_queue++) {
2804                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2805                         snprintf(xstats_names[cnt_used_entries].name,
2806                                 sizeof(xstats_names[0].name),
2807                                 "rx_q%u_%s",
2808                                 id_queue, eth_dev_rxq_stats_strings[idx].name);
2809                         cnt_used_entries++;
2810                 }
2811
2812         }
2813         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2814         for (id_queue = 0; id_queue < num_q; id_queue++) {
2815                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2816                         snprintf(xstats_names[cnt_used_entries].name,
2817                                 sizeof(xstats_names[0].name),
2818                                 "tx_q%u_%s",
2819                                 id_queue, eth_dev_txq_stats_strings[idx].name);
2820                         cnt_used_entries++;
2821                 }
2822         }
2823         return cnt_used_entries;
2824 }
2825
2826 /* retrieve ethdev extended statistics names */
2827 int
2828 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2829         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2830         uint64_t *ids)
2831 {
2832         struct rte_eth_xstat_name *xstats_names_copy;
2833         unsigned int no_basic_stat_requested = 1;
2834         unsigned int no_ext_stat_requested = 1;
2835         unsigned int expected_entries;
2836         unsigned int basic_count;
2837         struct rte_eth_dev *dev;
2838         unsigned int i;
2839         int ret;
2840
2841         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2842         dev = &rte_eth_devices[port_id];
2843
2844         basic_count = eth_dev_get_xstats_basic_count(dev);
2845         ret = eth_dev_get_xstats_count(port_id);
2846         if (ret < 0)
2847                 return ret;
2848         expected_entries = (unsigned int)ret;
2849
2850         /* Return max number of stats if no ids given */
2851         if (!ids) {
2852                 if (!xstats_names)
2853                         return expected_entries;
2854                 else if (xstats_names && size < expected_entries)
2855                         return expected_entries;
2856         }
2857
2858         if (ids && !xstats_names)
2859                 return -EINVAL;
2860
2861         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2862                 uint64_t ids_copy[size];
2863
2864                 for (i = 0; i < size; i++) {
2865                         if (ids[i] < basic_count) {
2866                                 no_basic_stat_requested = 0;
2867                                 break;
2868                         }
2869
2870                         /*
2871                          * Convert ids to xstats ids that PMD knows.
2872                          * ids known by user are basic + extended stats.
2873                          */
2874                         ids_copy[i] = ids[i] - basic_count;
2875                 }
2876
2877                 if (no_basic_stat_requested)
2878                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2879                                         xstats_names, ids_copy, size);
2880         }
2881
2882         /* Retrieve all stats */
2883         if (!ids) {
2884                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2885                                 expected_entries);
2886                 if (num_stats < 0 || num_stats > (int)expected_entries)
2887                         return num_stats;
2888                 else
2889                         return expected_entries;
2890         }
2891
2892         xstats_names_copy = calloc(expected_entries,
2893                 sizeof(struct rte_eth_xstat_name));
2894
2895         if (!xstats_names_copy) {
2896                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2897                 return -ENOMEM;
2898         }
2899
2900         if (ids) {
2901                 for (i = 0; i < size; i++) {
2902                         if (ids[i] >= basic_count) {
2903                                 no_ext_stat_requested = 0;
2904                                 break;
2905                         }
2906                 }
2907         }
2908
2909         /* Fill xstats_names_copy structure */
2910         if (ids && no_ext_stat_requested) {
2911                 eth_basic_stats_get_names(dev, xstats_names_copy);
2912         } else {
2913                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2914                         expected_entries);
2915                 if (ret < 0) {
2916                         free(xstats_names_copy);
2917                         return ret;
2918                 }
2919         }
2920
2921         /* Filter stats */
2922         for (i = 0; i < size; i++) {
2923                 if (ids[i] >= expected_entries) {
2924                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2925                         free(xstats_names_copy);
2926                         return -1;
2927                 }
2928                 xstats_names[i] = xstats_names_copy[ids[i]];
2929         }
2930
2931         free(xstats_names_copy);
2932         return size;
2933 }
2934
2935 int
2936 rte_eth_xstats_get_names(uint16_t port_id,
2937         struct rte_eth_xstat_name *xstats_names,
2938         unsigned int size)
2939 {
2940         struct rte_eth_dev *dev;
2941         int cnt_used_entries;
2942         int cnt_expected_entries;
2943         int cnt_driver_entries;
2944
2945         cnt_expected_entries = eth_dev_get_xstats_count(port_id);
2946         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2947                         (int)size < cnt_expected_entries)
2948                 return cnt_expected_entries;
2949
2950         /* port_id checked in eth_dev_get_xstats_count() */
2951         dev = &rte_eth_devices[port_id];
2952
2953         cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
2954
2955         if (dev->dev_ops->xstats_get_names != NULL) {
2956                 /* If there are any driver-specific xstats, append them
2957                  * to end of list.
2958                  */
2959                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2960                         dev,
2961                         xstats_names + cnt_used_entries,
2962                         size - cnt_used_entries);
2963                 if (cnt_driver_entries < 0)
2964                         return eth_err(port_id, cnt_driver_entries);
2965                 cnt_used_entries += cnt_driver_entries;
2966         }
2967
2968         return cnt_used_entries;
2969 }
2970
2971
2972 static int
2973 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2974 {
2975         struct rte_eth_dev *dev;
2976         struct rte_eth_stats eth_stats;
2977         unsigned int count = 0, i, q;
2978         uint64_t val, *stats_ptr;
2979         uint16_t nb_rxqs, nb_txqs;
2980         int ret;
2981
2982         ret = rte_eth_stats_get(port_id, &eth_stats);
2983         if (ret < 0)
2984                 return ret;
2985
2986         dev = &rte_eth_devices[port_id];
2987
2988         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2989         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2990
2991         /* global stats */
2992         for (i = 0; i < RTE_NB_STATS; i++) {
2993                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2994                                         eth_dev_stats_strings[i].offset);
2995                 val = *stats_ptr;
2996                 xstats[count++].value = val;
2997         }
2998
2999         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3000                 return count;
3001
3002         /* per-rxq stats */
3003         for (q = 0; q < nb_rxqs; q++) {
3004                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3005                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3006                                         eth_dev_rxq_stats_strings[i].offset +
3007                                         q * sizeof(uint64_t));
3008                         val = *stats_ptr;
3009                         xstats[count++].value = val;
3010                 }
3011         }
3012
3013         /* per-txq stats */
3014         for (q = 0; q < nb_txqs; q++) {
3015                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3016                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3017                                         eth_dev_txq_stats_strings[i].offset +
3018                                         q * sizeof(uint64_t));
3019                         val = *stats_ptr;
3020                         xstats[count++].value = val;
3021                 }
3022         }
3023         return count;
3024 }
3025
3026 /* retrieve ethdev extended statistics */
3027 int
3028 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3029                          uint64_t *values, unsigned int size)
3030 {
3031         unsigned int no_basic_stat_requested = 1;
3032         unsigned int no_ext_stat_requested = 1;
3033         unsigned int num_xstats_filled;
3034         unsigned int basic_count;
3035         uint16_t expected_entries;
3036         struct rte_eth_dev *dev;
3037         unsigned int i;
3038         int ret;
3039
3040         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3041         ret = eth_dev_get_xstats_count(port_id);
3042         if (ret < 0)
3043                 return ret;
3044         expected_entries = (uint16_t)ret;
3045         struct rte_eth_xstat xstats[expected_entries];
3046         dev = &rte_eth_devices[port_id];
3047         basic_count = eth_dev_get_xstats_basic_count(dev);
3048
3049         /* Return max number of stats if no ids given */
3050         if (!ids) {
3051                 if (!values)
3052                         return expected_entries;
3053                 else if (values && size < expected_entries)
3054                         return expected_entries;
3055         }
3056
3057         if (ids && !values)
3058                 return -EINVAL;
3059
3060         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3061                 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3062                 uint64_t ids_copy[size];
3063
3064                 for (i = 0; i < size; i++) {
3065                         if (ids[i] < basic_count) {
3066                                 no_basic_stat_requested = 0;
3067                                 break;
3068                         }
3069
3070                         /*
3071                          * Convert ids to xstats ids that PMD knows.
3072                          * ids known by user are basic + extended stats.
3073                          */
3074                         ids_copy[i] = ids[i] - basic_count;
3075                 }
3076
3077                 if (no_basic_stat_requested)
3078                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3079                                         values, size);
3080         }
3081
3082         if (ids) {
3083                 for (i = 0; i < size; i++) {
3084                         if (ids[i] >= basic_count) {
3085                                 no_ext_stat_requested = 0;
3086                                 break;
3087                         }
3088                 }
3089         }
3090
3091         /* Fill the xstats structure */
3092         if (ids && no_ext_stat_requested)
3093                 ret = eth_basic_stats_get(port_id, xstats);
3094         else
3095                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3096
3097         if (ret < 0)
3098                 return ret;
3099         num_xstats_filled = (unsigned int)ret;
3100
3101         /* Return all stats */
3102         if (!ids) {
3103                 for (i = 0; i < num_xstats_filled; i++)
3104                         values[i] = xstats[i].value;
3105                 return expected_entries;
3106         }
3107
3108         /* Filter stats */
3109         for (i = 0; i < size; i++) {
3110                 if (ids[i] >= expected_entries) {
3111                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3112                         return -1;
3113                 }
3114                 values[i] = xstats[ids[i]].value;
3115         }
3116         return size;
3117 }
3118
3119 int
3120 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3121         unsigned int n)
3122 {
3123         struct rte_eth_dev *dev;
3124         unsigned int count = 0, i;
3125         signed int xcount = 0;
3126         uint16_t nb_rxqs, nb_txqs;
3127         int ret;
3128
3129         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3130
3131         dev = &rte_eth_devices[port_id];
3132
3133         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3134         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3135
3136         /* Return generic statistics */
3137         count = RTE_NB_STATS;
3138         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3139                 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3140
3141         /* implemented by the driver */
3142         if (dev->dev_ops->xstats_get != NULL) {
3143                 /* Retrieve the xstats from the driver at the end of the
3144                  * xstats struct.
3145                  */
3146                 xcount = (*dev->dev_ops->xstats_get)(dev,
3147                                      xstats ? xstats + count : NULL,
3148                                      (n > count) ? n - count : 0);
3149
3150                 if (xcount < 0)
3151                         return eth_err(port_id, xcount);
3152         }
3153
3154         if (n < count + xcount || xstats == NULL)
3155                 return count + xcount;
3156
3157         /* now fill the xstats structure */
3158         ret = eth_basic_stats_get(port_id, xstats);
3159         if (ret < 0)
3160                 return ret;
3161         count = ret;
3162
3163         for (i = 0; i < count; i++)
3164                 xstats[i].id = i;
3165         /* add an offset to driver-specific stats */
3166         for ( ; i < count + xcount; i++)
3167                 xstats[i].id += count;
3168
3169         return count + xcount;
3170 }
3171
3172 /* reset ethdev extended statistics */
3173 int
3174 rte_eth_xstats_reset(uint16_t port_id)
3175 {
3176         struct rte_eth_dev *dev;
3177
3178         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3179         dev = &rte_eth_devices[port_id];
3180
3181         /* implemented by the driver */
3182         if (dev->dev_ops->xstats_reset != NULL)
3183                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3184
3185         /* fallback to default */
3186         return rte_eth_stats_reset(port_id);
3187 }
3188
3189 static int
3190 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3191                 uint8_t stat_idx, uint8_t is_rx)
3192 {
3193         struct rte_eth_dev *dev;
3194
3195         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3196
3197         dev = &rte_eth_devices[port_id];
3198
3199         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3200
3201         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3202                 return -EINVAL;
3203
3204         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3205                 return -EINVAL;
3206
3207         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3208                 return -EINVAL;
3209
3210         return (*dev->dev_ops->queue_stats_mapping_set)
3211                         (dev, queue_id, stat_idx, is_rx);
3212 }
3213
3214
3215 int
3216 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3217                 uint8_t stat_idx)
3218 {
3219         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3220                                                 tx_queue_id,
3221                                                 stat_idx, STAT_QMAP_TX));
3222 }
3223
3224
3225 int
3226 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3227                 uint8_t stat_idx)
3228 {
3229         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3230                                                 rx_queue_id,
3231                                                 stat_idx, STAT_QMAP_RX));
3232 }
3233
3234 int
3235 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3236 {
3237         struct rte_eth_dev *dev;
3238
3239         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3240         dev = &rte_eth_devices[port_id];
3241
3242         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3243         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3244                                                         fw_version, fw_size));
3245 }
3246
3247 int
3248 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3249 {
3250         struct rte_eth_dev *dev;
3251         const struct rte_eth_desc_lim lim = {
3252                 .nb_max = UINT16_MAX,
3253                 .nb_min = 0,
3254                 .nb_align = 1,
3255                 .nb_seg_max = UINT16_MAX,
3256                 .nb_mtu_seg_max = UINT16_MAX,
3257         };
3258         int diag;
3259
3260         /*
3261          * Init dev_info before port_id check since caller does not have
3262          * return status and does not know if get is successful or not.
3263          */
3264         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3265         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3266
3267         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3268         dev = &rte_eth_devices[port_id];
3269
3270         dev_info->rx_desc_lim = lim;
3271         dev_info->tx_desc_lim = lim;
3272         dev_info->device = dev->device;
3273         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3274         dev_info->max_mtu = UINT16_MAX;
3275
3276         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3277         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3278         if (diag != 0) {
3279                 /* Cleanup already filled in device information */
3280                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3281                 return eth_err(port_id, diag);
3282         }
3283
3284         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3285         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3286                         RTE_MAX_QUEUES_PER_PORT);
3287         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3288                         RTE_MAX_QUEUES_PER_PORT);
3289
3290         dev_info->driver_name = dev->device->driver->name;
3291         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3292         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3293
3294         dev_info->dev_flags = &dev->data->dev_flags;
3295
3296         return 0;
3297 }
3298
3299 int
3300 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3301                                  uint32_t *ptypes, int num)
3302 {
3303         int i, j;
3304         struct rte_eth_dev *dev;
3305         const uint32_t *all_ptypes;
3306
3307         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3308         dev = &rte_eth_devices[port_id];
3309         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3310         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3311
3312         if (!all_ptypes)
3313                 return 0;
3314
3315         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3316                 if (all_ptypes[i] & ptype_mask) {
3317                         if (j < num)
3318                                 ptypes[j] = all_ptypes[i];
3319                         j++;
3320                 }
3321
3322         return j;
3323 }
3324
3325 int
3326 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3327                                  uint32_t *set_ptypes, unsigned int num)
3328 {
3329         const uint32_t valid_ptype_masks[] = {
3330                 RTE_PTYPE_L2_MASK,
3331                 RTE_PTYPE_L3_MASK,
3332                 RTE_PTYPE_L4_MASK,
3333                 RTE_PTYPE_TUNNEL_MASK,
3334                 RTE_PTYPE_INNER_L2_MASK,
3335                 RTE_PTYPE_INNER_L3_MASK,
3336                 RTE_PTYPE_INNER_L4_MASK,
3337         };
3338         const uint32_t *all_ptypes;
3339         struct rte_eth_dev *dev;
3340         uint32_t unused_mask;
3341         unsigned int i, j;
3342         int ret;
3343
3344         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3345         dev = &rte_eth_devices[port_id];
3346
3347         if (num > 0 && set_ptypes == NULL)
3348                 return -EINVAL;
3349
3350         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3351                         *dev->dev_ops->dev_ptypes_set == NULL) {
3352                 ret = 0;
3353                 goto ptype_unknown;
3354         }
3355
3356         if (ptype_mask == 0) {
3357                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3358                                 ptype_mask);
3359                 goto ptype_unknown;
3360         }
3361
3362         unused_mask = ptype_mask;
3363         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3364                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3365                 if (mask && mask != valid_ptype_masks[i]) {
3366                         ret = -EINVAL;
3367                         goto ptype_unknown;
3368                 }
3369                 unused_mask &= ~valid_ptype_masks[i];
3370         }
3371
3372         if (unused_mask) {
3373                 ret = -EINVAL;
3374                 goto ptype_unknown;
3375         }
3376
3377         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3378         if (all_ptypes == NULL) {
3379                 ret = 0;
3380                 goto ptype_unknown;
3381         }
3382
3383         /*
3384          * Accommodate as many set_ptypes as possible. If the supplied
3385          * set_ptypes array is insufficient fill it partially.
3386          */
3387         for (i = 0, j = 0; set_ptypes != NULL &&
3388                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3389                 if (ptype_mask & all_ptypes[i]) {
3390                         if (j < num - 1) {
3391                                 set_ptypes[j] = all_ptypes[i];
3392                                 j++;
3393                                 continue;
3394                         }
3395                         break;
3396                 }
3397         }
3398
3399         if (set_ptypes != NULL && j < num)
3400                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3401
3402         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3403
3404 ptype_unknown:
3405         if (num > 0)
3406                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3407
3408         return ret;
3409 }
3410
3411 int
3412 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3413 {
3414         struct rte_eth_dev *dev;
3415
3416         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3417         dev = &rte_eth_devices[port_id];
3418         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3419
3420         return 0;
3421 }
3422
3423 int
3424 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3425 {
3426         struct rte_eth_dev *dev;
3427
3428         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3429
3430         dev = &rte_eth_devices[port_id];
3431         *mtu = dev->data->mtu;
3432         return 0;
3433 }
3434
3435 int
3436 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3437 {
3438         int ret;
3439         struct rte_eth_dev_info dev_info;
3440         struct rte_eth_dev *dev;
3441
3442         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3443         dev = &rte_eth_devices[port_id];
3444         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3445
3446         /*
3447          * Check if the device supports dev_infos_get, if it does not
3448          * skip min_mtu/max_mtu validation here as this requires values
3449          * that are populated within the call to rte_eth_dev_info_get()
3450          * which relies on dev->dev_ops->dev_infos_get.
3451          */
3452         if (*dev->dev_ops->dev_infos_get != NULL) {
3453                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3454                 if (ret != 0)
3455                         return ret;
3456
3457                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3458                         return -EINVAL;
3459         }
3460
3461         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3462         if (!ret)
3463                 dev->data->mtu = mtu;
3464
3465         return eth_err(port_id, ret);
3466 }
3467
3468 int
3469 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3470 {
3471         struct rte_eth_dev *dev;
3472         int ret;
3473
3474         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3475         dev = &rte_eth_devices[port_id];
3476         if (!(dev->data->dev_conf.rxmode.offloads &
3477               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3478                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3479                         port_id);
3480                 return -ENOSYS;
3481         }
3482
3483         if (vlan_id > 4095) {
3484                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3485                         port_id, vlan_id);
3486                 return -EINVAL;
3487         }
3488         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3489
3490         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3491         if (ret == 0) {
3492                 struct rte_vlan_filter_conf *vfc;
3493                 int vidx;
3494                 int vbit;
3495
3496                 vfc = &dev->data->vlan_filter_conf;
3497                 vidx = vlan_id / 64;
3498                 vbit = vlan_id % 64;
3499
3500                 if (on)
3501                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3502                 else
3503                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3504         }
3505
3506         return eth_err(port_id, ret);
3507 }
3508
3509 int
3510 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3511                                     int on)
3512 {
3513         struct rte_eth_dev *dev;
3514
3515         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3516         dev = &rte_eth_devices[port_id];
3517         if (rx_queue_id >= dev->data->nb_rx_queues) {
3518                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3519                 return -EINVAL;
3520         }
3521
3522         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3523         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3524
3525         return 0;
3526 }
3527
3528 int
3529 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3530                                 enum rte_vlan_type vlan_type,
3531                                 uint16_t tpid)
3532 {
3533         struct rte_eth_dev *dev;
3534
3535         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3536         dev = &rte_eth_devices[port_id];
3537         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3538
3539         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3540                                                                tpid));
3541 }
3542
3543 int
3544 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3545 {
3546         struct rte_eth_dev_info dev_info;
3547         struct rte_eth_dev *dev;
3548         int ret = 0;
3549         int mask = 0;
3550         int cur, org = 0;
3551         uint64_t orig_offloads;
3552         uint64_t dev_offloads;
3553         uint64_t new_offloads;
3554
3555         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3556         dev = &rte_eth_devices[port_id];
3557
3558         /* save original values in case of failure */
3559         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3560         dev_offloads = orig_offloads;
3561
3562         /* check which option changed by application */
3563         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3564         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3565         if (cur != org) {
3566                 if (cur)
3567                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3568                 else
3569                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3570                 mask |= ETH_VLAN_STRIP_MASK;
3571         }
3572
3573         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3574         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3575         if (cur != org) {
3576                 if (cur)
3577                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3578                 else
3579                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3580                 mask |= ETH_VLAN_FILTER_MASK;
3581         }
3582
3583         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3584         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3585         if (cur != org) {
3586                 if (cur)
3587                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3588                 else
3589                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3590                 mask |= ETH_VLAN_EXTEND_MASK;
3591         }
3592
3593         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3594         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3595         if (cur != org) {
3596                 if (cur)
3597                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3598                 else
3599                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3600                 mask |= ETH_QINQ_STRIP_MASK;
3601         }
3602
3603         /*no change*/
3604         if (mask == 0)
3605                 return ret;
3606
3607         ret = rte_eth_dev_info_get(port_id, &dev_info);
3608         if (ret != 0)
3609                 return ret;
3610
3611         /* Rx VLAN offloading must be within its device capabilities */
3612         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3613                 new_offloads = dev_offloads & ~orig_offloads;
3614                 RTE_ETHDEV_LOG(ERR,
3615                         "Ethdev port_id=%u requested new added VLAN offloads "
3616                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3617                         "0x%" PRIx64 " in %s()\n",
3618                         port_id, new_offloads, dev_info.rx_offload_capa,
3619                         __func__);
3620                 return -EINVAL;
3621         }
3622
3623         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3624         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3625         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3626         if (ret) {
3627                 /* hit an error restore  original values */
3628                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3629         }
3630
3631         return eth_err(port_id, ret);
3632 }
3633
3634 int
3635 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3636 {
3637         struct rte_eth_dev *dev;
3638         uint64_t *dev_offloads;
3639         int ret = 0;
3640
3641         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3642         dev = &rte_eth_devices[port_id];
3643         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3644
3645         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3646                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3647
3648         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3649                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3650
3651         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3652                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3653
3654         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3655                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3656
3657         return ret;
3658 }
3659
3660 int
3661 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3662 {
3663         struct rte_eth_dev *dev;
3664
3665         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3666         dev = &rte_eth_devices[port_id];
3667         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3668
3669         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3670 }
3671
3672 int
3673 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3674 {
3675         struct rte_eth_dev *dev;
3676
3677         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3678         dev = &rte_eth_devices[port_id];
3679         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3680         memset(fc_conf, 0, sizeof(*fc_conf));
3681         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3682 }
3683
3684 int
3685 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3686 {
3687         struct rte_eth_dev *dev;
3688
3689         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3690         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3691                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3692                 return -EINVAL;
3693         }
3694
3695         dev = &rte_eth_devices[port_id];
3696         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3697         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3698 }
3699
3700 int
3701 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3702                                    struct rte_eth_pfc_conf *pfc_conf)
3703 {
3704         struct rte_eth_dev *dev;
3705
3706         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3707         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3708                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3709                 return -EINVAL;
3710         }
3711
3712         dev = &rte_eth_devices[port_id];
3713         /* High water, low water validation are device specific */
3714         if  (*dev->dev_ops->priority_flow_ctrl_set)
3715                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3716                                         (dev, pfc_conf));
3717         return -ENOTSUP;
3718 }
3719
3720 static int
3721 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3722                         uint16_t reta_size)
3723 {
3724         uint16_t i, num;
3725
3726         if (!reta_conf)
3727                 return -EINVAL;
3728
3729         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3730         for (i = 0; i < num; i++) {
3731                 if (reta_conf[i].mask)
3732                         return 0;
3733         }
3734
3735         return -EINVAL;
3736 }
3737
3738 static int
3739 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3740                          uint16_t reta_size,
3741                          uint16_t max_rxq)
3742 {
3743         uint16_t i, idx, shift;
3744
3745         if (!reta_conf)
3746                 return -EINVAL;
3747
3748         if (max_rxq == 0) {
3749                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3750                 return -EINVAL;
3751         }
3752
3753         for (i = 0; i < reta_size; i++) {
3754                 idx = i / RTE_RETA_GROUP_SIZE;
3755                 shift = i % RTE_RETA_GROUP_SIZE;
3756                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3757                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3758                         RTE_ETHDEV_LOG(ERR,
3759                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3760                                 idx, shift,
3761                                 reta_conf[idx].reta[shift], max_rxq);
3762                         return -EINVAL;
3763                 }
3764         }
3765
3766         return 0;
3767 }
3768
3769 int
3770 rte_eth_dev_rss_reta_update(uint16_t port_id,
3771                             struct rte_eth_rss_reta_entry64 *reta_conf,
3772                             uint16_t reta_size)
3773 {
3774         struct rte_eth_dev *dev;
3775         int ret;
3776
3777         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3778         /* Check mask bits */
3779         ret = eth_check_reta_mask(reta_conf, reta_size);
3780         if (ret < 0)
3781                 return ret;
3782
3783         dev = &rte_eth_devices[port_id];
3784
3785         /* Check entry value */
3786         ret = eth_check_reta_entry(reta_conf, reta_size,
3787                                 dev->data->nb_rx_queues);
3788         if (ret < 0)
3789                 return ret;
3790
3791         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3792         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3793                                                              reta_size));
3794 }
3795
3796 int
3797 rte_eth_dev_rss_reta_query(uint16_t port_id,
3798                            struct rte_eth_rss_reta_entry64 *reta_conf,
3799                            uint16_t reta_size)
3800 {
3801         struct rte_eth_dev *dev;
3802         int ret;
3803
3804         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3805
3806         /* Check mask bits */
3807         ret = eth_check_reta_mask(reta_conf, reta_size);
3808         if (ret < 0)
3809                 return ret;
3810
3811         dev = &rte_eth_devices[port_id];
3812         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3813         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3814                                                             reta_size));
3815 }
3816
3817 int
3818 rte_eth_dev_rss_hash_update(uint16_t port_id,
3819                             struct rte_eth_rss_conf *rss_conf)
3820 {
3821         struct rte_eth_dev *dev;
3822         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3823         int ret;
3824
3825         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3826
3827         ret = rte_eth_dev_info_get(port_id, &dev_info);
3828         if (ret != 0)
3829                 return ret;
3830
3831         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3832
3833         dev = &rte_eth_devices[port_id];
3834         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3835             dev_info.flow_type_rss_offloads) {
3836                 RTE_ETHDEV_LOG(ERR,
3837                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3838                         port_id, rss_conf->rss_hf,
3839                         dev_info.flow_type_rss_offloads);
3840                 return -EINVAL;
3841         }
3842         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3843         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3844                                                                  rss_conf));
3845 }
3846
3847 int
3848 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3849                               struct rte_eth_rss_conf *rss_conf)
3850 {
3851         struct rte_eth_dev *dev;
3852
3853         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3854         dev = &rte_eth_devices[port_id];
3855         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3856         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3857                                                                    rss_conf));
3858 }
3859
3860 int
3861 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3862                                 struct rte_eth_udp_tunnel *udp_tunnel)
3863 {
3864         struct rte_eth_dev *dev;
3865
3866         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3867         if (udp_tunnel == NULL) {
3868                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3869                 return -EINVAL;
3870         }
3871
3872         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3873                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3874                 return -EINVAL;
3875         }
3876
3877         dev = &rte_eth_devices[port_id];
3878         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3879         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3880                                                                 udp_tunnel));
3881 }
3882
3883 int
3884 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3885                                    struct rte_eth_udp_tunnel *udp_tunnel)
3886 {
3887         struct rte_eth_dev *dev;
3888
3889         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3890         dev = &rte_eth_devices[port_id];
3891
3892         if (udp_tunnel == NULL) {
3893                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3894                 return -EINVAL;
3895         }
3896
3897         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3898                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3899                 return -EINVAL;
3900         }
3901
3902         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3903         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3904                                                                 udp_tunnel));
3905 }
3906
3907 int
3908 rte_eth_led_on(uint16_t port_id)
3909 {
3910         struct rte_eth_dev *dev;
3911
3912         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3913         dev = &rte_eth_devices[port_id];
3914         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3915         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3916 }
3917
3918 int
3919 rte_eth_led_off(uint16_t port_id)
3920 {
3921         struct rte_eth_dev *dev;
3922
3923         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3924         dev = &rte_eth_devices[port_id];
3925         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3926         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3927 }
3928
3929 int
3930 rte_eth_fec_get_capability(uint16_t port_id,
3931                            struct rte_eth_fec_capa *speed_fec_capa,
3932                            unsigned int num)
3933 {
3934         struct rte_eth_dev *dev;
3935         int ret;
3936
3937         if (speed_fec_capa == NULL && num > 0)
3938                 return -EINVAL;
3939
3940         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3941         dev = &rte_eth_devices[port_id];
3942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
3943         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
3944
3945         return ret;
3946 }
3947
3948 int
3949 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
3950 {
3951         struct rte_eth_dev *dev;
3952
3953         if (fec_capa == NULL)
3954                 return -EINVAL;
3955
3956         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3957         dev = &rte_eth_devices[port_id];
3958         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
3959         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
3960 }
3961
3962 int
3963 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
3964 {
3965         struct rte_eth_dev *dev;
3966
3967         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3968         dev = &rte_eth_devices[port_id];
3969         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
3970         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
3971 }
3972
3973 /*
3974  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3975  * an empty spot.
3976  */
3977 static int
3978 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3979 {
3980         struct rte_eth_dev_info dev_info;
3981         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3982         unsigned i;
3983         int ret;
3984
3985         ret = rte_eth_dev_info_get(port_id, &dev_info);
3986         if (ret != 0)
3987                 return -1;
3988
3989         for (i = 0; i < dev_info.max_mac_addrs; i++)
3990                 if (memcmp(addr, &dev->data->mac_addrs[i],
3991                                 RTE_ETHER_ADDR_LEN) == 0)
3992                         return i;
3993
3994         return -1;
3995 }
3996
3997 static const struct rte_ether_addr null_mac_addr;
3998
3999 int
4000 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4001                         uint32_t pool)
4002 {
4003         struct rte_eth_dev *dev;
4004         int index;
4005         uint64_t pool_mask;
4006         int ret;
4007
4008         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4009         dev = &rte_eth_devices[port_id];
4010         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4011
4012         if (rte_is_zero_ether_addr(addr)) {
4013                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4014                         port_id);
4015                 return -EINVAL;
4016         }
4017         if (pool >= ETH_64_POOLS) {
4018                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
4019                 return -EINVAL;
4020         }
4021
4022         index = eth_dev_get_mac_addr_index(port_id, addr);
4023         if (index < 0) {
4024                 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4025                 if (index < 0) {
4026                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4027                                 port_id);
4028                         return -ENOSPC;
4029                 }
4030         } else {
4031                 pool_mask = dev->data->mac_pool_sel[index];
4032
4033                 /* Check if both MAC address and pool is already there, and do nothing */
4034                 if (pool_mask & (1ULL << pool))
4035                         return 0;
4036         }
4037
4038         /* Update NIC */
4039         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4040
4041         if (ret == 0) {
4042                 /* Update address in NIC data structure */
4043                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4044
4045                 /* Update pool bitmap in NIC data structure */
4046                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
4047         }
4048
4049         return eth_err(port_id, ret);
4050 }
4051
4052 int
4053 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4054 {
4055         struct rte_eth_dev *dev;
4056         int index;
4057
4058         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4059         dev = &rte_eth_devices[port_id];
4060         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4061
4062         index = eth_dev_get_mac_addr_index(port_id, addr);
4063         if (index == 0) {
4064                 RTE_ETHDEV_LOG(ERR,
4065                         "Port %u: Cannot remove default MAC address\n",
4066                         port_id);
4067                 return -EADDRINUSE;
4068         } else if (index < 0)
4069                 return 0;  /* Do nothing if address wasn't found */
4070
4071         /* Update NIC */
4072         (*dev->dev_ops->mac_addr_remove)(dev, index);
4073
4074         /* Update address in NIC data structure */
4075         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4076
4077         /* reset pool bitmap */
4078         dev->data->mac_pool_sel[index] = 0;
4079
4080         return 0;
4081 }
4082
4083 int
4084 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4085 {
4086         struct rte_eth_dev *dev;
4087         int ret;
4088
4089         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4090
4091         if (!rte_is_valid_assigned_ether_addr(addr))
4092                 return -EINVAL;
4093
4094         dev = &rte_eth_devices[port_id];
4095         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4096
4097         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4098         if (ret < 0)
4099                 return ret;
4100
4101         /* Update default address in NIC data structure */
4102         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4103
4104         return 0;
4105 }
4106
4107
4108 /*
4109  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4110  * an empty spot.
4111  */
4112 static int
4113 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4114                 const struct rte_ether_addr *addr)
4115 {
4116         struct rte_eth_dev_info dev_info;
4117         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4118         unsigned i;
4119         int ret;
4120
4121         ret = rte_eth_dev_info_get(port_id, &dev_info);
4122         if (ret != 0)
4123                 return -1;
4124
4125         if (!dev->data->hash_mac_addrs)
4126                 return -1;
4127
4128         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4129                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4130                         RTE_ETHER_ADDR_LEN) == 0)
4131                         return i;
4132
4133         return -1;
4134 }
4135
4136 int
4137 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4138                                 uint8_t on)
4139 {
4140         int index;
4141         int ret;
4142         struct rte_eth_dev *dev;
4143
4144         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4145
4146         dev = &rte_eth_devices[port_id];
4147         if (rte_is_zero_ether_addr(addr)) {
4148                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4149                         port_id);
4150                 return -EINVAL;
4151         }
4152
4153         index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4154         /* Check if it's already there, and do nothing */
4155         if ((index >= 0) && on)
4156                 return 0;
4157
4158         if (index < 0) {
4159                 if (!on) {
4160                         RTE_ETHDEV_LOG(ERR,
4161                                 "Port %u: the MAC address was not set in UTA\n",
4162                                 port_id);
4163                         return -EINVAL;
4164                 }
4165
4166                 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4167                 if (index < 0) {
4168                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4169                                 port_id);
4170                         return -ENOSPC;
4171                 }
4172         }
4173
4174         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4175         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4176         if (ret == 0) {
4177                 /* Update address in NIC data structure */
4178                 if (on)
4179                         rte_ether_addr_copy(addr,
4180                                         &dev->data->hash_mac_addrs[index]);
4181                 else
4182                         rte_ether_addr_copy(&null_mac_addr,
4183                                         &dev->data->hash_mac_addrs[index]);
4184         }
4185
4186         return eth_err(port_id, ret);
4187 }
4188
4189 int
4190 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4191 {
4192         struct rte_eth_dev *dev;
4193
4194         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4195
4196         dev = &rte_eth_devices[port_id];
4197
4198         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4199         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4200                                                                        on));
4201 }
4202
4203 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4204                                         uint16_t tx_rate)
4205 {
4206         struct rte_eth_dev *dev;
4207         struct rte_eth_dev_info dev_info;
4208         struct rte_eth_link link;
4209         int ret;
4210
4211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4212
4213         ret = rte_eth_dev_info_get(port_id, &dev_info);
4214         if (ret != 0)
4215                 return ret;
4216
4217         dev = &rte_eth_devices[port_id];
4218         link = dev->data->dev_link;
4219
4220         if (queue_idx > dev_info.max_tx_queues) {
4221                 RTE_ETHDEV_LOG(ERR,
4222                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4223                         port_id, queue_idx);
4224                 return -EINVAL;
4225         }
4226
4227         if (tx_rate > link.link_speed) {
4228                 RTE_ETHDEV_LOG(ERR,
4229                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4230                         tx_rate, link.link_speed);
4231                 return -EINVAL;
4232         }
4233
4234         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4235         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4236                                                         queue_idx, tx_rate));
4237 }
4238
4239 int
4240 rte_eth_mirror_rule_set(uint16_t port_id,
4241                         struct rte_eth_mirror_conf *mirror_conf,
4242                         uint8_t rule_id, uint8_t on)
4243 {
4244         struct rte_eth_dev *dev;
4245
4246         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4247         if (mirror_conf->rule_type == 0) {
4248                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4249                 return -EINVAL;
4250         }
4251
4252         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4253                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4254                         ETH_64_POOLS - 1);
4255                 return -EINVAL;
4256         }
4257
4258         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4259              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4260             (mirror_conf->pool_mask == 0)) {
4261                 RTE_ETHDEV_LOG(ERR,
4262                         "Invalid mirror pool, pool mask can not be 0\n");
4263                 return -EINVAL;
4264         }
4265
4266         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4267             mirror_conf->vlan.vlan_mask == 0) {
4268                 RTE_ETHDEV_LOG(ERR,
4269                         "Invalid vlan mask, vlan mask can not be 0\n");
4270                 return -EINVAL;
4271         }
4272
4273         dev = &rte_eth_devices[port_id];
4274         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4275
4276         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4277                                                 mirror_conf, rule_id, on));
4278 }
4279
4280 int
4281 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4282 {
4283         struct rte_eth_dev *dev;
4284
4285         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4286
4287         dev = &rte_eth_devices[port_id];
4288         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4289
4290         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
4291                                                                    rule_id));
4292 }
4293
4294 RTE_INIT(eth_dev_init_cb_lists)
4295 {
4296         int i;
4297
4298         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4299                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4300 }
4301
4302 int
4303 rte_eth_dev_callback_register(uint16_t port_id,
4304                         enum rte_eth_event_type event,
4305                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4306 {
4307         struct rte_eth_dev *dev;
4308         struct rte_eth_dev_callback *user_cb;
4309         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4310         uint16_t last_port;
4311
4312         if (!cb_fn)
4313                 return -EINVAL;
4314
4315         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4316                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4317                 return -EINVAL;
4318         }
4319
4320         if (port_id == RTE_ETH_ALL) {
4321                 next_port = 0;
4322                 last_port = RTE_MAX_ETHPORTS - 1;
4323         } else {
4324                 next_port = last_port = port_id;
4325         }
4326
4327         rte_spinlock_lock(&eth_dev_cb_lock);
4328
4329         do {
4330                 dev = &rte_eth_devices[next_port];
4331
4332                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4333                         if (user_cb->cb_fn == cb_fn &&
4334                                 user_cb->cb_arg == cb_arg &&
4335                                 user_cb->event == event) {
4336                                 break;
4337                         }
4338                 }
4339
4340                 /* create a new callback. */
4341                 if (user_cb == NULL) {
4342                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4343                                 sizeof(struct rte_eth_dev_callback), 0);
4344                         if (user_cb != NULL) {
4345                                 user_cb->cb_fn = cb_fn;
4346                                 user_cb->cb_arg = cb_arg;
4347                                 user_cb->event = event;
4348                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4349                                                   user_cb, next);
4350                         } else {
4351                                 rte_spinlock_unlock(&eth_dev_cb_lock);
4352                                 rte_eth_dev_callback_unregister(port_id, event,
4353                                                                 cb_fn, cb_arg);
4354                                 return -ENOMEM;
4355                         }
4356
4357                 }
4358         } while (++next_port <= last_port);
4359
4360         rte_spinlock_unlock(&eth_dev_cb_lock);
4361         return 0;
4362 }
4363
4364 int
4365 rte_eth_dev_callback_unregister(uint16_t port_id,
4366                         enum rte_eth_event_type event,
4367                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4368 {
4369         int ret;
4370         struct rte_eth_dev *dev;
4371         struct rte_eth_dev_callback *cb, *next;
4372         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4373         uint16_t last_port;
4374
4375         if (!cb_fn)
4376                 return -EINVAL;
4377
4378         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4379                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4380                 return -EINVAL;
4381         }
4382
4383         if (port_id == RTE_ETH_ALL) {
4384                 next_port = 0;
4385                 last_port = RTE_MAX_ETHPORTS - 1;
4386         } else {
4387                 next_port = last_port = port_id;
4388         }
4389
4390         rte_spinlock_lock(&eth_dev_cb_lock);
4391
4392         do {
4393                 dev = &rte_eth_devices[next_port];
4394                 ret = 0;
4395                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4396                      cb = next) {
4397
4398                         next = TAILQ_NEXT(cb, next);
4399
4400                         if (cb->cb_fn != cb_fn || cb->event != event ||
4401                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4402                                 continue;
4403
4404                         /*
4405                          * if this callback is not executing right now,
4406                          * then remove it.
4407                          */
4408                         if (cb->active == 0) {
4409                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4410                                 rte_free(cb);
4411                         } else {
4412                                 ret = -EAGAIN;
4413                         }
4414                 }
4415         } while (++next_port <= last_port);
4416
4417         rte_spinlock_unlock(&eth_dev_cb_lock);
4418         return ret;
4419 }
4420
4421 int
4422 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4423         enum rte_eth_event_type event, void *ret_param)
4424 {
4425         struct rte_eth_dev_callback *cb_lst;
4426         struct rte_eth_dev_callback dev_cb;
4427         int rc = 0;
4428
4429         rte_spinlock_lock(&eth_dev_cb_lock);
4430         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4431                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4432                         continue;
4433                 dev_cb = *cb_lst;
4434                 cb_lst->active = 1;
4435                 if (ret_param != NULL)
4436                         dev_cb.ret_param = ret_param;
4437
4438                 rte_spinlock_unlock(&eth_dev_cb_lock);
4439                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4440                                 dev_cb.cb_arg, dev_cb.ret_param);
4441                 rte_spinlock_lock(&eth_dev_cb_lock);
4442                 cb_lst->active = 0;
4443         }
4444         rte_spinlock_unlock(&eth_dev_cb_lock);
4445         return rc;
4446 }
4447
4448 void
4449 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4450 {
4451         if (dev == NULL)
4452                 return;
4453
4454         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4455
4456         dev->state = RTE_ETH_DEV_ATTACHED;
4457 }
4458
4459 int
4460 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4461 {
4462         uint32_t vec;
4463         struct rte_eth_dev *dev;
4464         struct rte_intr_handle *intr_handle;
4465         uint16_t qid;
4466         int rc;
4467
4468         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4469
4470         dev = &rte_eth_devices[port_id];
4471
4472         if (!dev->intr_handle) {
4473                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4474                 return -ENOTSUP;
4475         }
4476
4477         intr_handle = dev->intr_handle;
4478         if (!intr_handle->intr_vec) {
4479                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4480                 return -EPERM;
4481         }
4482
4483         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4484                 vec = intr_handle->intr_vec[qid];
4485                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4486                 if (rc && rc != -EEXIST) {
4487                         RTE_ETHDEV_LOG(ERR,
4488                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4489                                 port_id, qid, op, epfd, vec);
4490                 }
4491         }
4492
4493         return 0;
4494 }
4495
4496 int
4497 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4498 {
4499         struct rte_intr_handle *intr_handle;
4500         struct rte_eth_dev *dev;
4501         unsigned int efd_idx;
4502         uint32_t vec;
4503         int fd;
4504
4505         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4506
4507         dev = &rte_eth_devices[port_id];
4508
4509         if (queue_id >= dev->data->nb_rx_queues) {
4510                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4511                 return -1;
4512         }
4513
4514         if (!dev->intr_handle) {
4515                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4516                 return -1;
4517         }
4518
4519         intr_handle = dev->intr_handle;
4520         if (!intr_handle->intr_vec) {
4521                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4522                 return -1;
4523         }
4524
4525         vec = intr_handle->intr_vec[queue_id];
4526         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4527                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4528         fd = intr_handle->efds[efd_idx];
4529
4530         return fd;
4531 }
4532
4533 static inline int
4534 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4535                 const char *ring_name)
4536 {
4537         return snprintf(name, len, "eth_p%d_q%d_%s",
4538                         port_id, queue_id, ring_name);
4539 }
4540
4541 const struct rte_memzone *
4542 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4543                          uint16_t queue_id, size_t size, unsigned align,
4544                          int socket_id)
4545 {
4546         char z_name[RTE_MEMZONE_NAMESIZE];
4547         const struct rte_memzone *mz;
4548         int rc;
4549
4550         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4551                         queue_id, ring_name);
4552         if (rc >= RTE_MEMZONE_NAMESIZE) {
4553                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4554                 rte_errno = ENAMETOOLONG;
4555                 return NULL;
4556         }
4557
4558         mz = rte_memzone_lookup(z_name);
4559         if (mz) {
4560                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4561                                 size > mz->len ||
4562                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4563                         RTE_ETHDEV_LOG(ERR,
4564                                 "memzone %s does not justify the requested attributes\n",
4565                                 mz->name);
4566                         return NULL;
4567                 }
4568
4569                 return mz;
4570         }
4571
4572         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4573                         RTE_MEMZONE_IOVA_CONTIG, align);
4574 }
4575
4576 int
4577 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4578                 uint16_t queue_id)
4579 {
4580         char z_name[RTE_MEMZONE_NAMESIZE];
4581         const struct rte_memzone *mz;
4582         int rc = 0;
4583
4584         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4585                         queue_id, ring_name);
4586         if (rc >= RTE_MEMZONE_NAMESIZE) {
4587                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4588                 return -ENAMETOOLONG;
4589         }
4590
4591         mz = rte_memzone_lookup(z_name);
4592         if (mz)
4593                 rc = rte_memzone_free(mz);
4594         else
4595                 rc = -ENOENT;
4596
4597         return rc;
4598 }
4599
4600 int
4601 rte_eth_dev_create(struct rte_device *device, const char *name,
4602         size_t priv_data_size,
4603         ethdev_bus_specific_init ethdev_bus_specific_init,
4604         void *bus_init_params,
4605         ethdev_init_t ethdev_init, void *init_params)
4606 {
4607         struct rte_eth_dev *ethdev;
4608         int retval;
4609
4610         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4611
4612         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4613                 ethdev = rte_eth_dev_allocate(name);
4614                 if (!ethdev)
4615                         return -ENODEV;
4616
4617                 if (priv_data_size) {
4618                         ethdev->data->dev_private = rte_zmalloc_socket(
4619                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4620                                 device->numa_node);
4621
4622                         if (!ethdev->data->dev_private) {
4623                                 RTE_ETHDEV_LOG(ERR,
4624                                         "failed to allocate private data\n");
4625                                 retval = -ENOMEM;
4626                                 goto probe_failed;
4627                         }
4628                 }
4629         } else {
4630                 ethdev = rte_eth_dev_attach_secondary(name);
4631                 if (!ethdev) {
4632                         RTE_ETHDEV_LOG(ERR,
4633                                 "secondary process attach failed, ethdev doesn't exist\n");
4634                         return  -ENODEV;
4635                 }
4636         }
4637
4638         ethdev->device = device;
4639
4640         if (ethdev_bus_specific_init) {
4641                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4642                 if (retval) {
4643                         RTE_ETHDEV_LOG(ERR,
4644                                 "ethdev bus specific initialisation failed\n");
4645                         goto probe_failed;
4646                 }
4647         }
4648
4649         retval = ethdev_init(ethdev, init_params);
4650         if (retval) {
4651                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4652                 goto probe_failed;
4653         }
4654
4655         rte_eth_dev_probing_finish(ethdev);
4656
4657         return retval;
4658
4659 probe_failed:
4660         rte_eth_dev_release_port(ethdev);
4661         return retval;
4662 }
4663
4664 int
4665 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4666         ethdev_uninit_t ethdev_uninit)
4667 {
4668         int ret;
4669
4670         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4671         if (!ethdev)
4672                 return -ENODEV;
4673
4674         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4675
4676         ret = ethdev_uninit(ethdev);
4677         if (ret)
4678                 return ret;
4679
4680         return rte_eth_dev_release_port(ethdev);
4681 }
4682
4683 int
4684 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4685                           int epfd, int op, void *data)
4686 {
4687         uint32_t vec;
4688         struct rte_eth_dev *dev;
4689         struct rte_intr_handle *intr_handle;
4690         int rc;
4691
4692         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4693
4694         dev = &rte_eth_devices[port_id];
4695         if (queue_id >= dev->data->nb_rx_queues) {
4696                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4697                 return -EINVAL;
4698         }
4699
4700         if (!dev->intr_handle) {
4701                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4702                 return -ENOTSUP;
4703         }
4704
4705         intr_handle = dev->intr_handle;
4706         if (!intr_handle->intr_vec) {
4707                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4708                 return -EPERM;
4709         }
4710
4711         vec = intr_handle->intr_vec[queue_id];
4712         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4713         if (rc && rc != -EEXIST) {
4714                 RTE_ETHDEV_LOG(ERR,
4715                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4716                         port_id, queue_id, op, epfd, vec);
4717                 return rc;
4718         }
4719
4720         return 0;
4721 }
4722
4723 int
4724 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4725                            uint16_t queue_id)
4726 {
4727         struct rte_eth_dev *dev;
4728         int ret;
4729
4730         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4731
4732         dev = &rte_eth_devices[port_id];
4733
4734         ret = eth_dev_validate_rx_queue(dev, queue_id);
4735         if (ret != 0)
4736                 return ret;
4737
4738         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4739         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4740                                                                 queue_id));
4741 }
4742
4743 int
4744 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4745                             uint16_t queue_id)
4746 {
4747         struct rte_eth_dev *dev;
4748         int ret;
4749
4750         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4751
4752         dev = &rte_eth_devices[port_id];
4753
4754         ret = eth_dev_validate_rx_queue(dev, queue_id);
4755         if (ret != 0)
4756                 return ret;
4757
4758         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4759         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4760                                                                 queue_id));
4761 }
4762
4763
4764 const struct rte_eth_rxtx_callback *
4765 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4766                 rte_rx_callback_fn fn, void *user_param)
4767 {
4768 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4769         rte_errno = ENOTSUP;
4770         return NULL;
4771 #endif
4772         struct rte_eth_dev *dev;
4773
4774         /* check input parameters */
4775         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4776                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4777                 rte_errno = EINVAL;
4778                 return NULL;
4779         }
4780         dev = &rte_eth_devices[port_id];
4781         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4782                 rte_errno = EINVAL;
4783                 return NULL;
4784         }
4785         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4786
4787         if (cb == NULL) {
4788                 rte_errno = ENOMEM;
4789                 return NULL;
4790         }
4791
4792         cb->fn.rx = fn;
4793         cb->param = user_param;
4794
4795         rte_spinlock_lock(&eth_dev_rx_cb_lock);
4796         /* Add the callbacks in fifo order. */
4797         struct rte_eth_rxtx_callback *tail =
4798                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4799
4800         if (!tail) {
4801                 /* Stores to cb->fn and cb->param should complete before
4802                  * cb is visible to data plane.
4803                  */
4804                 __atomic_store_n(
4805                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4806                         cb, __ATOMIC_RELEASE);
4807
4808         } else {
4809                 while (tail->next)
4810                         tail = tail->next;
4811                 /* Stores to cb->fn and cb->param should complete before
4812                  * cb is visible to data plane.
4813                  */
4814                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4815         }
4816         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4817
4818         return cb;
4819 }
4820
4821 const struct rte_eth_rxtx_callback *
4822 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4823                 rte_rx_callback_fn fn, void *user_param)
4824 {
4825 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4826         rte_errno = ENOTSUP;
4827         return NULL;
4828 #endif
4829         /* check input parameters */
4830         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4831                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4832                 rte_errno = EINVAL;
4833                 return NULL;
4834         }
4835
4836         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4837
4838         if (cb == NULL) {
4839                 rte_errno = ENOMEM;
4840                 return NULL;
4841         }
4842
4843         cb->fn.rx = fn;
4844         cb->param = user_param;
4845
4846         rte_spinlock_lock(&eth_dev_rx_cb_lock);
4847         /* Add the callbacks at first position */
4848         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4849         /* Stores to cb->fn, cb->param and cb->next should complete before
4850          * cb is visible to data plane threads.
4851          */
4852         __atomic_store_n(
4853                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4854                 cb, __ATOMIC_RELEASE);
4855         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4856
4857         return cb;
4858 }
4859
4860 const struct rte_eth_rxtx_callback *
4861 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4862                 rte_tx_callback_fn fn, void *user_param)
4863 {
4864 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4865         rte_errno = ENOTSUP;
4866         return NULL;
4867 #endif
4868         struct rte_eth_dev *dev;
4869
4870         /* check input parameters */
4871         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4872                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4873                 rte_errno = EINVAL;
4874                 return NULL;
4875         }
4876
4877         dev = &rte_eth_devices[port_id];
4878         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4879                 rte_errno = EINVAL;
4880                 return NULL;
4881         }
4882
4883         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4884
4885         if (cb == NULL) {
4886                 rte_errno = ENOMEM;
4887                 return NULL;
4888         }
4889
4890         cb->fn.tx = fn;
4891         cb->param = user_param;
4892
4893         rte_spinlock_lock(&eth_dev_tx_cb_lock);
4894         /* Add the callbacks in fifo order. */
4895         struct rte_eth_rxtx_callback *tail =
4896                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4897
4898         if (!tail) {
4899                 /* Stores to cb->fn and cb->param should complete before
4900                  * cb is visible to data plane.
4901                  */
4902                 __atomic_store_n(
4903                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4904                         cb, __ATOMIC_RELEASE);
4905
4906         } else {
4907                 while (tail->next)
4908                         tail = tail->next;
4909                 /* Stores to cb->fn and cb->param should complete before
4910                  * cb is visible to data plane.
4911                  */
4912                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4913         }
4914         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4915
4916         return cb;
4917 }
4918
4919 int
4920 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4921                 const struct rte_eth_rxtx_callback *user_cb)
4922 {
4923 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4924         return -ENOTSUP;
4925 #endif
4926         /* Check input parameters. */
4927         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4928         if (user_cb == NULL ||
4929                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4930                 return -EINVAL;
4931
4932         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4933         struct rte_eth_rxtx_callback *cb;
4934         struct rte_eth_rxtx_callback **prev_cb;
4935         int ret = -EINVAL;
4936
4937         rte_spinlock_lock(&eth_dev_rx_cb_lock);
4938         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4939         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4940                 cb = *prev_cb;
4941                 if (cb == user_cb) {
4942                         /* Remove the user cb from the callback list. */
4943                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4944                         ret = 0;
4945                         break;
4946                 }
4947         }
4948         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4949
4950         return ret;
4951 }
4952
4953 int
4954 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4955                 const struct rte_eth_rxtx_callback *user_cb)
4956 {
4957 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4958         return -ENOTSUP;
4959 #endif
4960         /* Check input parameters. */
4961         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4962         if (user_cb == NULL ||
4963                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4964                 return -EINVAL;
4965
4966         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4967         int ret = -EINVAL;
4968         struct rte_eth_rxtx_callback *cb;
4969         struct rte_eth_rxtx_callback **prev_cb;
4970
4971         rte_spinlock_lock(&eth_dev_tx_cb_lock);
4972         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4973         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4974                 cb = *prev_cb;
4975                 if (cb == user_cb) {
4976                         /* Remove the user cb from the callback list. */
4977                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4978                         ret = 0;
4979                         break;
4980                 }
4981         }
4982         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4983
4984         return ret;
4985 }
4986
4987 int
4988 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4989         struct rte_eth_rxq_info *qinfo)
4990 {
4991         struct rte_eth_dev *dev;
4992
4993         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4994
4995         if (qinfo == NULL)
4996                 return -EINVAL;
4997
4998         dev = &rte_eth_devices[port_id];
4999         if (queue_id >= dev->data->nb_rx_queues) {
5000                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5001                 return -EINVAL;
5002         }
5003
5004         if (dev->data->rx_queues == NULL ||
5005                         dev->data->rx_queues[queue_id] == NULL) {
5006                 RTE_ETHDEV_LOG(ERR,
5007                                "Rx queue %"PRIu16" of device with port_id=%"
5008                                PRIu16" has not been setup\n",
5009                                queue_id, port_id);
5010                 return -EINVAL;
5011         }
5012
5013         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5014                 RTE_ETHDEV_LOG(INFO,
5015                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5016                         queue_id, port_id);
5017                 return -EINVAL;
5018         }
5019
5020         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5021
5022         memset(qinfo, 0, sizeof(*qinfo));
5023         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5024         return 0;
5025 }
5026
5027 int
5028 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5029         struct rte_eth_txq_info *qinfo)
5030 {
5031         struct rte_eth_dev *dev;
5032
5033         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5034
5035         if (qinfo == NULL)
5036                 return -EINVAL;
5037
5038         dev = &rte_eth_devices[port_id];
5039         if (queue_id >= dev->data->nb_tx_queues) {
5040                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5041                 return -EINVAL;
5042         }
5043
5044         if (dev->data->tx_queues == NULL ||
5045                         dev->data->tx_queues[queue_id] == NULL) {
5046                 RTE_ETHDEV_LOG(ERR,
5047                                "Tx queue %"PRIu16" of device with port_id=%"
5048                                PRIu16" has not been setup\n",
5049                                queue_id, port_id);
5050                 return -EINVAL;
5051         }
5052
5053         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5054                 RTE_ETHDEV_LOG(INFO,
5055                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5056                         queue_id, port_id);
5057                 return -EINVAL;
5058         }
5059
5060         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5061
5062         memset(qinfo, 0, sizeof(*qinfo));
5063         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5064
5065         return 0;
5066 }
5067
5068 int
5069 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5070                           struct rte_eth_burst_mode *mode)
5071 {
5072         struct rte_eth_dev *dev;
5073
5074         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5075
5076         if (mode == NULL)
5077                 return -EINVAL;
5078
5079         dev = &rte_eth_devices[port_id];
5080
5081         if (queue_id >= dev->data->nb_rx_queues) {
5082                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5083                 return -EINVAL;
5084         }
5085
5086         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5087         memset(mode, 0, sizeof(*mode));
5088         return eth_err(port_id,
5089                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5090 }
5091
5092 int
5093 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5094                           struct rte_eth_burst_mode *mode)
5095 {
5096         struct rte_eth_dev *dev;
5097
5098         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5099
5100         if (mode == NULL)
5101                 return -EINVAL;
5102
5103         dev = &rte_eth_devices[port_id];
5104
5105         if (queue_id >= dev->data->nb_tx_queues) {
5106                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5107                 return -EINVAL;
5108         }
5109
5110         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5111         memset(mode, 0, sizeof(*mode));
5112         return eth_err(port_id,
5113                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5114 }
5115
5116 int
5117 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5118                              struct rte_ether_addr *mc_addr_set,
5119                              uint32_t nb_mc_addr)
5120 {
5121         struct rte_eth_dev *dev;
5122
5123         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5124
5125         dev = &rte_eth_devices[port_id];
5126         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5127         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5128                                                 mc_addr_set, nb_mc_addr));
5129 }
5130
5131 int
5132 rte_eth_timesync_enable(uint16_t port_id)
5133 {
5134         struct rte_eth_dev *dev;
5135
5136         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5137         dev = &rte_eth_devices[port_id];
5138
5139         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5140         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5141 }
5142
5143 int
5144 rte_eth_timesync_disable(uint16_t port_id)
5145 {
5146         struct rte_eth_dev *dev;
5147
5148         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5149         dev = &rte_eth_devices[port_id];
5150
5151         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5152         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5153 }
5154
5155 int
5156 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5157                                    uint32_t flags)
5158 {
5159         struct rte_eth_dev *dev;
5160
5161         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5162         dev = &rte_eth_devices[port_id];
5163
5164         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5165         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5166                                 (dev, timestamp, flags));
5167 }
5168
5169 int
5170 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5171                                    struct timespec *timestamp)
5172 {
5173         struct rte_eth_dev *dev;
5174
5175         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5176         dev = &rte_eth_devices[port_id];
5177
5178         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5179         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5180                                 (dev, timestamp));
5181 }
5182
5183 int
5184 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5185 {
5186         struct rte_eth_dev *dev;
5187
5188         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5189         dev = &rte_eth_devices[port_id];
5190
5191         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5192         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
5193                                                                       delta));
5194 }
5195
5196 int
5197 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5198 {
5199         struct rte_eth_dev *dev;
5200
5201         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5202         dev = &rte_eth_devices[port_id];
5203
5204         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5205         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5206                                                                 timestamp));
5207 }
5208
5209 int
5210 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5211 {
5212         struct rte_eth_dev *dev;
5213
5214         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5215         dev = &rte_eth_devices[port_id];
5216
5217         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5218         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5219                                                                 timestamp));
5220 }
5221
5222 int
5223 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5224 {
5225         struct rte_eth_dev *dev;
5226
5227         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5228         dev = &rte_eth_devices[port_id];
5229
5230         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5231         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5232 }
5233
5234 int
5235 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5236 {
5237         struct rte_eth_dev *dev;
5238
5239         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5240
5241         dev = &rte_eth_devices[port_id];
5242         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5243         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5244 }
5245
5246 int
5247 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5248 {
5249         struct rte_eth_dev *dev;
5250
5251         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5252
5253         dev = &rte_eth_devices[port_id];
5254         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5255         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5256 }
5257
5258 int
5259 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5260 {
5261         struct rte_eth_dev *dev;
5262
5263         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5264
5265         dev = &rte_eth_devices[port_id];
5266         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5267         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5268 }
5269
5270 int
5271 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5272 {
5273         struct rte_eth_dev *dev;
5274
5275         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5276
5277         dev = &rte_eth_devices[port_id];
5278         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5279         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5280 }
5281
5282 int
5283 rte_eth_dev_get_module_info(uint16_t port_id,
5284                             struct rte_eth_dev_module_info *modinfo)
5285 {
5286         struct rte_eth_dev *dev;
5287
5288         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5289
5290         dev = &rte_eth_devices[port_id];
5291         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5292         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5293 }
5294
5295 int
5296 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5297                               struct rte_dev_eeprom_info *info)
5298 {
5299         struct rte_eth_dev *dev;
5300
5301         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5302
5303         dev = &rte_eth_devices[port_id];
5304         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5305         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5306 }
5307
5308 int
5309 rte_eth_dev_get_dcb_info(uint16_t port_id,
5310                              struct rte_eth_dcb_info *dcb_info)
5311 {
5312         struct rte_eth_dev *dev;
5313
5314         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5315
5316         dev = &rte_eth_devices[port_id];
5317         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5318
5319         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5320         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5321 }
5322
5323 static void
5324 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5325                 const struct rte_eth_desc_lim *desc_lim)
5326 {
5327         if (desc_lim->nb_align != 0)
5328                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5329
5330         if (desc_lim->nb_max != 0)
5331                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5332
5333         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5334 }
5335
5336 int
5337 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5338                                  uint16_t *nb_rx_desc,
5339                                  uint16_t *nb_tx_desc)
5340 {
5341         struct rte_eth_dev_info dev_info;
5342         int ret;
5343
5344         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5345
5346         ret = rte_eth_dev_info_get(port_id, &dev_info);
5347         if (ret != 0)
5348                 return ret;
5349
5350         if (nb_rx_desc != NULL)
5351                 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5352
5353         if (nb_tx_desc != NULL)
5354                 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5355
5356         return 0;
5357 }
5358
5359 int
5360 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5361                                    struct rte_eth_hairpin_cap *cap)
5362 {
5363         struct rte_eth_dev *dev;
5364
5365         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5366
5367         dev = &rte_eth_devices[port_id];
5368         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5369         memset(cap, 0, sizeof(*cap));
5370         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5371 }
5372
5373 int
5374 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5375 {
5376         if (dev->data->rx_queue_state[queue_id] ==
5377             RTE_ETH_QUEUE_STATE_HAIRPIN)
5378                 return 1;
5379         return 0;
5380 }
5381
5382 int
5383 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5384 {
5385         if (dev->data->tx_queue_state[queue_id] ==
5386             RTE_ETH_QUEUE_STATE_HAIRPIN)
5387                 return 1;
5388         return 0;
5389 }
5390
5391 int
5392 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5393 {
5394         struct rte_eth_dev *dev;
5395
5396         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5397
5398         if (pool == NULL)
5399                 return -EINVAL;
5400
5401         dev = &rte_eth_devices[port_id];
5402
5403         if (*dev->dev_ops->pool_ops_supported == NULL)
5404                 return 1; /* all pools are supported */
5405
5406         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5407 }
5408
5409 /**
5410  * A set of values to describe the possible states of a switch domain.
5411  */
5412 enum rte_eth_switch_domain_state {
5413         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5414         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5415 };
5416
5417 /**
5418  * Array of switch domains available for allocation. Array is sized to
5419  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5420  * ethdev ports in a single process.
5421  */
5422 static struct rte_eth_dev_switch {
5423         enum rte_eth_switch_domain_state state;
5424 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
5425
5426 int
5427 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5428 {
5429         unsigned int i;
5430
5431         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5432
5433         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5434                 if (eth_dev_switch_domains[i].state ==
5435                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5436                         eth_dev_switch_domains[i].state =
5437                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5438                         *domain_id = i;
5439                         return 0;
5440                 }
5441         }
5442
5443         return -ENOSPC;
5444 }
5445
5446 int
5447 rte_eth_switch_domain_free(uint16_t domain_id)
5448 {
5449         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5450                 domain_id >= RTE_MAX_ETHPORTS)
5451                 return -EINVAL;
5452
5453         if (eth_dev_switch_domains[domain_id].state !=
5454                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5455                 return -EINVAL;
5456
5457         eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5458
5459         return 0;
5460 }
5461
5462 static int
5463 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5464 {
5465         int state;
5466         struct rte_kvargs_pair *pair;
5467         char *letter;
5468
5469         arglist->str = strdup(str_in);
5470         if (arglist->str == NULL)
5471                 return -ENOMEM;
5472
5473         letter = arglist->str;
5474         state = 0;
5475         arglist->count = 0;
5476         pair = &arglist->pairs[0];
5477         while (1) {
5478                 switch (state) {
5479                 case 0: /* Initial */
5480                         if (*letter == '=')
5481                                 return -EINVAL;
5482                         else if (*letter == '\0')
5483                                 return 0;
5484
5485                         state = 1;
5486                         pair->key = letter;
5487                         /* fall-thru */
5488
5489                 case 1: /* Parsing key */
5490                         if (*letter == '=') {
5491                                 *letter = '\0';
5492                                 pair->value = letter + 1;
5493                                 state = 2;
5494                         } else if (*letter == ',' || *letter == '\0')
5495                                 return -EINVAL;
5496                         break;
5497
5498
5499                 case 2: /* Parsing value */
5500                         if (*letter == '[')
5501                                 state = 3;
5502                         else if (*letter == ',') {
5503                                 *letter = '\0';
5504                                 arglist->count++;
5505                                 pair = &arglist->pairs[arglist->count];
5506                                 state = 0;
5507                         } else if (*letter == '\0') {
5508                                 letter--;
5509                                 arglist->count++;
5510                                 pair = &arglist->pairs[arglist->count];
5511                                 state = 0;
5512                         }
5513                         break;
5514
5515                 case 3: /* Parsing list */
5516                         if (*letter == ']')
5517                                 state = 2;
5518                         else if (*letter == '\0')
5519                                 return -EINVAL;
5520                         break;
5521                 }
5522                 letter++;
5523         }
5524 }
5525
5526 int
5527 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5528 {
5529         struct rte_kvargs args;
5530         struct rte_kvargs_pair *pair;
5531         unsigned int i;
5532         int result = 0;
5533
5534         memset(eth_da, 0, sizeof(*eth_da));
5535
5536         result = eth_dev_devargs_tokenise(&args, dargs);
5537         if (result < 0)
5538                 goto parse_cleanup;
5539
5540         for (i = 0; i < args.count; i++) {
5541                 pair = &args.pairs[i];
5542                 if (strcmp("representor", pair->key) == 0) {
5543                         result = rte_eth_devargs_parse_list(pair->value,
5544                                 rte_eth_devargs_parse_representor_ports,
5545                                 eth_da);
5546                         if (result < 0)
5547                                 goto parse_cleanup;
5548                 }
5549         }
5550
5551 parse_cleanup:
5552         if (args.str)
5553                 free(args.str);
5554
5555         return result;
5556 }
5557
5558 static int
5559 eth_dev_handle_port_list(const char *cmd __rte_unused,
5560                 const char *params __rte_unused,
5561                 struct rte_tel_data *d)
5562 {
5563         int port_id;
5564
5565         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5566         RTE_ETH_FOREACH_DEV(port_id)
5567                 rte_tel_data_add_array_int(d, port_id);
5568         return 0;
5569 }
5570
5571 static void
5572 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5573                 const char *stat_name)
5574 {
5575         int q;
5576         struct rte_tel_data *q_data = rte_tel_data_alloc();
5577         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5578         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5579                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
5580         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5581 }
5582
5583 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5584
5585 static int
5586 eth_dev_handle_port_stats(const char *cmd __rte_unused,
5587                 const char *params,
5588                 struct rte_tel_data *d)
5589 {
5590         struct rte_eth_stats stats;
5591         int port_id, ret;
5592
5593         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5594                 return -1;
5595
5596         port_id = atoi(params);
5597         if (!rte_eth_dev_is_valid_port(port_id))
5598                 return -1;
5599
5600         ret = rte_eth_stats_get(port_id, &stats);
5601         if (ret < 0)
5602                 return -1;
5603
5604         rte_tel_data_start_dict(d);
5605         ADD_DICT_STAT(stats, ipackets);
5606         ADD_DICT_STAT(stats, opackets);
5607         ADD_DICT_STAT(stats, ibytes);
5608         ADD_DICT_STAT(stats, obytes);
5609         ADD_DICT_STAT(stats, imissed);
5610         ADD_DICT_STAT(stats, ierrors);
5611         ADD_DICT_STAT(stats, oerrors);
5612         ADD_DICT_STAT(stats, rx_nombuf);
5613         eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5614         eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5615         eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5616         eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5617         eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
5618
5619         return 0;
5620 }
5621
5622 static int
5623 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
5624                 const char *params,
5625                 struct rte_tel_data *d)
5626 {
5627         struct rte_eth_xstat *eth_xstats;
5628         struct rte_eth_xstat_name *xstat_names;
5629         int port_id, num_xstats;
5630         int i, ret;
5631         char *end_param;
5632
5633         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5634                 return -1;
5635
5636         port_id = strtoul(params, &end_param, 0);
5637         if (*end_param != '\0')
5638                 RTE_ETHDEV_LOG(NOTICE,
5639                         "Extra parameters passed to ethdev telemetry command, ignoring");
5640         if (!rte_eth_dev_is_valid_port(port_id))
5641                 return -1;
5642
5643         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5644         if (num_xstats < 0)
5645                 return -1;
5646
5647         /* use one malloc for both names and stats */
5648         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5649                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
5650         if (eth_xstats == NULL)
5651                 return -1;
5652         xstat_names = (void *)&eth_xstats[num_xstats];
5653
5654         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5655         if (ret < 0 || ret > num_xstats) {
5656                 free(eth_xstats);
5657                 return -1;
5658         }
5659
5660         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5661         if (ret < 0 || ret > num_xstats) {
5662                 free(eth_xstats);
5663                 return -1;
5664         }
5665
5666         rte_tel_data_start_dict(d);
5667         for (i = 0; i < num_xstats; i++)
5668                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5669                                 eth_xstats[i].value);
5670         return 0;
5671 }
5672
5673 static int
5674 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
5675                 const char *params,
5676                 struct rte_tel_data *d)
5677 {
5678         static const char *status_str = "status";
5679         int ret, port_id;
5680         struct rte_eth_link link;
5681         char *end_param;
5682
5683         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5684                 return -1;
5685
5686         port_id = strtoul(params, &end_param, 0);
5687         if (*end_param != '\0')
5688                 RTE_ETHDEV_LOG(NOTICE,
5689                         "Extra parameters passed to ethdev telemetry command, ignoring");
5690         if (!rte_eth_dev_is_valid_port(port_id))
5691                 return -1;
5692
5693         ret = rte_eth_link_get(port_id, &link);
5694         if (ret < 0)
5695                 return -1;
5696
5697         rte_tel_data_start_dict(d);
5698         if (!link.link_status) {
5699                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
5700                 return 0;
5701         }
5702         rte_tel_data_add_dict_string(d, status_str, "UP");
5703         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5704         rte_tel_data_add_dict_string(d, "duplex",
5705                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
5706                                 "full-duplex" : "half-duplex");
5707         return 0;
5708 }
5709
5710 int
5711 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
5712                                   struct rte_hairpin_peer_info *cur_info,
5713                                   struct rte_hairpin_peer_info *peer_info,
5714                                   uint32_t direction)
5715 {
5716         struct rte_eth_dev *dev;
5717
5718         /* Current queue information is not mandatory. */
5719         if (peer_info == NULL)
5720                 return -EINVAL;
5721
5722         /* No need to check the validity again. */
5723         dev = &rte_eth_devices[peer_port];
5724         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
5725                                 -ENOTSUP);
5726
5727         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
5728                                         cur_info, peer_info, direction);
5729 }
5730
5731 int
5732 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
5733                                 struct rte_hairpin_peer_info *peer_info,
5734                                 uint32_t direction)
5735 {
5736         struct rte_eth_dev *dev;
5737
5738         if (peer_info == NULL)
5739                 return -EINVAL;
5740
5741         /* No need to check the validity again. */
5742         dev = &rte_eth_devices[cur_port];
5743         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
5744                                 -ENOTSUP);
5745
5746         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
5747                                                         peer_info, direction);
5748 }
5749
5750 int
5751 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
5752                                   uint32_t direction)
5753 {
5754         struct rte_eth_dev *dev;
5755
5756         /* No need to check the validity again. */
5757         dev = &rte_eth_devices[cur_port];
5758         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
5759                                 -ENOTSUP);
5760
5761         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
5762                                                           direction);
5763 }
5764
5765 RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
5766
5767 RTE_INIT(ethdev_init_telemetry)
5768 {
5769         rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
5770                         "Returns list of available ethdev ports. Takes no parameters");
5771         rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
5772                         "Returns the common stats for a port. Parameters: int port_id");
5773         rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
5774                         "Returns the extended stats for a port. Parameters: int port_id");
5775         rte_telemetry_register_cmd("/ethdev/link_status",
5776                         eth_dev_handle_port_link_status,
5777                         "Returns the link status for a port. Parameters: int port_id");
5778 }