ethdev: unify prefix for static functions and variables
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_kvargs.h>
38 #include <rte_class.h>
39 #include <rte_ether.h>
40 #include <rte_telemetry.h>
41
42 #include "rte_ethdev_trace.h"
43 #include "rte_ethdev.h"
44 #include "rte_ethdev_driver.h"
45 #include "ethdev_profile.h"
46 #include "ethdev_private.h"
47
48 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
49 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
50
51 /* spinlock for eth device callbacks */
52 static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* spinlock for add/remove rx callbacks */
55 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57 /* spinlock for add/remove tx callbacks */
58 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60 /* spinlock for shared data allocation */
61 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
62
63 /* store statistics names and its offset in stats structure  */
64 struct rte_eth_xstats_name_off {
65         char name[RTE_ETH_XSTATS_NAME_SIZE];
66         unsigned offset;
67 };
68
69 /* Shared memory between primary and secondary processes. */
70 static struct {
71         uint64_t next_owner_id;
72         rte_spinlock_t ownership_lock;
73         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
74 } *eth_dev_shared_data;
75
76 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
77         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
78         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
79         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
80         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
81         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
82         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
83         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
84         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
85                 rx_nombuf)},
86 };
87
88 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
89
90 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
91         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
92         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
93         {"errors", offsetof(struct rte_eth_stats, q_errors)},
94 };
95
96 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
97
98 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
99         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
100         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
101 };
102 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
103
104 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
105         { DEV_RX_OFFLOAD_##_name, #_name }
106
107 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)       \
108         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } eth_dev_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
133         RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
134 };
135
136 #undef RTE_RX_OFFLOAD_BIT2STR
137 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
138
139 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
140         { DEV_TX_OFFLOAD_##_name, #_name }
141
142 static const struct {
143         uint64_t offload;
144         const char *name;
145 } eth_dev_tx_offload_names[] = {
146         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
147         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
151         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
154         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
155         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
157         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
158         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
159         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
160         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
161         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
162         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
163         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
164         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
165         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
166         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
167         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
168 };
169
170 #undef RTE_TX_OFFLOAD_BIT2STR
171
172 /**
173  * The user application callback description.
174  *
175  * It contains callback address to be registered by user application,
176  * the pointer to the parameters for callback, and the event type.
177  */
178 struct rte_eth_dev_callback {
179         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
180         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
181         void *cb_arg;                           /**< Parameter for callback */
182         void *ret_param;                        /**< Return parameter */
183         enum rte_eth_event_type event;          /**< Interrupt event type */
184         uint32_t active;                        /**< Callback is executing */
185 };
186
187 enum {
188         STAT_QMAP_TX = 0,
189         STAT_QMAP_RX
190 };
191
192 int
193 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
194 {
195         int ret;
196         struct rte_devargs devargs = {.args = NULL};
197         const char *bus_param_key;
198         char *bus_str = NULL;
199         char *cls_str = NULL;
200         int str_size;
201
202         memset(iter, 0, sizeof(*iter));
203
204         /*
205          * The devargs string may use various syntaxes:
206          *   - 0000:08:00.0,representor=[1-3]
207          *   - pci:0000:06:00.0,representor=[0,5]
208          *   - class=eth,mac=00:11:22:33:44:55
209          * A new syntax is in development (not yet supported):
210          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
211          */
212
213         /*
214          * Handle pure class filter (i.e. without any bus-level argument),
215          * from future new syntax.
216          * rte_devargs_parse() is not yet supporting the new syntax,
217          * that's why this simple case is temporarily parsed here.
218          */
219 #define iter_anybus_str "class=eth,"
220         if (strncmp(devargs_str, iter_anybus_str,
221                         strlen(iter_anybus_str)) == 0) {
222                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
223                 goto end;
224         }
225
226         /* Split bus, device and parameters. */
227         ret = rte_devargs_parse(&devargs, devargs_str);
228         if (ret != 0)
229                 goto error;
230
231         /*
232          * Assume parameters of old syntax can match only at ethdev level.
233          * Extra parameters will be ignored, thanks to "+" prefix.
234          */
235         str_size = strlen(devargs.args) + 2;
236         cls_str = malloc(str_size);
237         if (cls_str == NULL) {
238                 ret = -ENOMEM;
239                 goto error;
240         }
241         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
242         if (ret != str_size - 1) {
243                 ret = -EINVAL;
244                 goto error;
245         }
246         iter->cls_str = cls_str;
247         free(devargs.args); /* allocated by rte_devargs_parse() */
248         devargs.args = NULL;
249
250         iter->bus = devargs.bus;
251         if (iter->bus->dev_iterate == NULL) {
252                 ret = -ENOTSUP;
253                 goto error;
254         }
255
256         /* Convert bus args to new syntax for use with new API dev_iterate. */
257         if (strcmp(iter->bus->name, "vdev") == 0) {
258                 bus_param_key = "name";
259         } else if (strcmp(iter->bus->name, "pci") == 0) {
260                 bus_param_key = "addr";
261         } else {
262                 ret = -ENOTSUP;
263                 goto error;
264         }
265         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
266         bus_str = malloc(str_size);
267         if (bus_str == NULL) {
268                 ret = -ENOMEM;
269                 goto error;
270         }
271         ret = snprintf(bus_str, str_size, "%s=%s",
272                         bus_param_key, devargs.name);
273         if (ret != str_size - 1) {
274                 ret = -EINVAL;
275                 goto error;
276         }
277         iter->bus_str = bus_str;
278
279 end:
280         iter->cls = rte_class_find_by_name("eth");
281         return 0;
282
283 error:
284         if (ret == -ENOTSUP)
285                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
286                                 iter->bus->name);
287         free(devargs.args);
288         free(bus_str);
289         free(cls_str);
290         return ret;
291 }
292
293 uint16_t
294 rte_eth_iterator_next(struct rte_dev_iterator *iter)
295 {
296         if (iter->cls == NULL) /* invalid ethdev iterator */
297                 return RTE_MAX_ETHPORTS;
298
299         do { /* loop to try all matching rte_device */
300                 /* If not pure ethdev filter and */
301                 if (iter->bus != NULL &&
302                                 /* not in middle of rte_eth_dev iteration, */
303                                 iter->class_device == NULL) {
304                         /* get next rte_device to try. */
305                         iter->device = iter->bus->dev_iterate(
306                                         iter->device, iter->bus_str, iter);
307                         if (iter->device == NULL)
308                                 break; /* no more rte_device candidate */
309                 }
310                 /* A device is matching bus part, need to check ethdev part. */
311                 iter->class_device = iter->cls->dev_iterate(
312                                 iter->class_device, iter->cls_str, iter);
313                 if (iter->class_device != NULL)
314                         return eth_dev_to_id(iter->class_device); /* match */
315         } while (iter->bus != NULL); /* need to try next rte_device */
316
317         /* No more ethdev port to iterate. */
318         rte_eth_iterator_cleanup(iter);
319         return RTE_MAX_ETHPORTS;
320 }
321
322 void
323 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
324 {
325         if (iter->bus_str == NULL)
326                 return; /* nothing to free in pure class filter */
327         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
328         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
329         memset(iter, 0, sizeof(*iter));
330 }
331
332 uint16_t
333 rte_eth_find_next(uint16_t port_id)
334 {
335         while (port_id < RTE_MAX_ETHPORTS &&
336                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
337                 port_id++;
338
339         if (port_id >= RTE_MAX_ETHPORTS)
340                 return RTE_MAX_ETHPORTS;
341
342         return port_id;
343 }
344
345 /*
346  * Macro to iterate over all valid ports for internal usage.
347  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
348  */
349 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
350         for (port_id = rte_eth_find_next(0); \
351              port_id < RTE_MAX_ETHPORTS; \
352              port_id = rte_eth_find_next(port_id + 1))
353
354 uint16_t
355 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
356 {
357         port_id = rte_eth_find_next(port_id);
358         while (port_id < RTE_MAX_ETHPORTS &&
359                         rte_eth_devices[port_id].device != parent)
360                 port_id = rte_eth_find_next(port_id + 1);
361
362         return port_id;
363 }
364
365 uint16_t
366 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
367 {
368         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
369         return rte_eth_find_next_of(port_id,
370                         rte_eth_devices[ref_port_id].device);
371 }
372
373 static void
374 eth_dev_shared_data_prepare(void)
375 {
376         const unsigned flags = 0;
377         const struct rte_memzone *mz;
378
379         rte_spinlock_lock(&eth_dev_shared_data_lock);
380
381         if (eth_dev_shared_data == NULL) {
382                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
383                         /* Allocate port data and ownership shared memory. */
384                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
385                                         sizeof(*eth_dev_shared_data),
386                                         rte_socket_id(), flags);
387                 } else
388                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
389                 if (mz == NULL)
390                         rte_panic("Cannot allocate ethdev shared data\n");
391
392                 eth_dev_shared_data = mz->addr;
393                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
394                         eth_dev_shared_data->next_owner_id =
395                                         RTE_ETH_DEV_NO_OWNER + 1;
396                         rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
397                         memset(eth_dev_shared_data->data, 0,
398                                sizeof(eth_dev_shared_data->data));
399                 }
400         }
401
402         rte_spinlock_unlock(&eth_dev_shared_data_lock);
403 }
404
405 static bool
406 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
407 {
408         return ethdev->data->name[0] != '\0';
409 }
410
411 static struct rte_eth_dev *
412 eth_dev_allocated(const char *name)
413 {
414         unsigned i;
415
416         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
417                 if (rte_eth_devices[i].data != NULL &&
418                     strcmp(rte_eth_devices[i].data->name, name) == 0)
419                         return &rte_eth_devices[i];
420         }
421         return NULL;
422 }
423
424 struct rte_eth_dev *
425 rte_eth_dev_allocated(const char *name)
426 {
427         struct rte_eth_dev *ethdev;
428
429         eth_dev_shared_data_prepare();
430
431         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
432
433         ethdev = eth_dev_allocated(name);
434
435         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
436
437         return ethdev;
438 }
439
440 static uint16_t
441 eth_dev_find_free_port(void)
442 {
443         unsigned i;
444
445         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
446                 /* Using shared name field to find a free port. */
447                 if (eth_dev_shared_data->data[i].name[0] == '\0') {
448                         RTE_ASSERT(rte_eth_devices[i].state ==
449                                    RTE_ETH_DEV_UNUSED);
450                         return i;
451                 }
452         }
453         return RTE_MAX_ETHPORTS;
454 }
455
456 static struct rte_eth_dev *
457 eth_dev_get(uint16_t port_id)
458 {
459         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
460
461         eth_dev->data = &eth_dev_shared_data->data[port_id];
462
463         return eth_dev;
464 }
465
466 struct rte_eth_dev *
467 rte_eth_dev_allocate(const char *name)
468 {
469         uint16_t port_id;
470         struct rte_eth_dev *eth_dev = NULL;
471         size_t name_len;
472
473         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
474         if (name_len == 0) {
475                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
476                 return NULL;
477         }
478
479         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
480                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
481                 return NULL;
482         }
483
484         eth_dev_shared_data_prepare();
485
486         /* Synchronize port creation between primary and secondary threads. */
487         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
488
489         if (eth_dev_allocated(name) != NULL) {
490                 RTE_ETHDEV_LOG(ERR,
491                         "Ethernet device with name %s already allocated\n",
492                         name);
493                 goto unlock;
494         }
495
496         port_id = eth_dev_find_free_port();
497         if (port_id == RTE_MAX_ETHPORTS) {
498                 RTE_ETHDEV_LOG(ERR,
499                         "Reached maximum number of Ethernet ports\n");
500                 goto unlock;
501         }
502
503         eth_dev = eth_dev_get(port_id);
504         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
505         eth_dev->data->port_id = port_id;
506         eth_dev->data->mtu = RTE_ETHER_MTU;
507         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
508
509 unlock:
510         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
511
512         return eth_dev;
513 }
514
515 /*
516  * Attach to a port already registered by the primary process, which
517  * makes sure that the same device would have the same port id both
518  * in the primary and secondary process.
519  */
520 struct rte_eth_dev *
521 rte_eth_dev_attach_secondary(const char *name)
522 {
523         uint16_t i;
524         struct rte_eth_dev *eth_dev = NULL;
525
526         eth_dev_shared_data_prepare();
527
528         /* Synchronize port attachment to primary port creation and release. */
529         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
530
531         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
532                 if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
533                         break;
534         }
535         if (i == RTE_MAX_ETHPORTS) {
536                 RTE_ETHDEV_LOG(ERR,
537                         "Device %s is not driven by the primary process\n",
538                         name);
539         } else {
540                 eth_dev = eth_dev_get(i);
541                 RTE_ASSERT(eth_dev->data->port_id == i);
542         }
543
544         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
545         return eth_dev;
546 }
547
548 int
549 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
550 {
551         if (eth_dev == NULL)
552                 return -EINVAL;
553
554         eth_dev_shared_data_prepare();
555
556         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
557                 rte_eth_dev_callback_process(eth_dev,
558                                 RTE_ETH_EVENT_DESTROY, NULL);
559
560         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
561
562         eth_dev->state = RTE_ETH_DEV_UNUSED;
563         eth_dev->device = NULL;
564         eth_dev->process_private = NULL;
565         eth_dev->intr_handle = NULL;
566         eth_dev->rx_pkt_burst = NULL;
567         eth_dev->tx_pkt_burst = NULL;
568         eth_dev->tx_pkt_prepare = NULL;
569         eth_dev->rx_queue_count = NULL;
570         eth_dev->rx_descriptor_done = NULL;
571         eth_dev->rx_descriptor_status = NULL;
572         eth_dev->tx_descriptor_status = NULL;
573         eth_dev->dev_ops = NULL;
574
575         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
576                 rte_free(eth_dev->data->rx_queues);
577                 rte_free(eth_dev->data->tx_queues);
578                 rte_free(eth_dev->data->mac_addrs);
579                 rte_free(eth_dev->data->hash_mac_addrs);
580                 rte_free(eth_dev->data->dev_private);
581                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
582                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
583         }
584
585         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
586
587         return 0;
588 }
589
590 int
591 rte_eth_dev_is_valid_port(uint16_t port_id)
592 {
593         if (port_id >= RTE_MAX_ETHPORTS ||
594             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
595                 return 0;
596         else
597                 return 1;
598 }
599
600 static int
601 eth_is_valid_owner_id(uint64_t owner_id)
602 {
603         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
604             eth_dev_shared_data->next_owner_id <= owner_id)
605                 return 0;
606         return 1;
607 }
608
609 uint64_t
610 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
611 {
612         port_id = rte_eth_find_next(port_id);
613         while (port_id < RTE_MAX_ETHPORTS &&
614                         rte_eth_devices[port_id].data->owner.id != owner_id)
615                 port_id = rte_eth_find_next(port_id + 1);
616
617         return port_id;
618 }
619
620 int
621 rte_eth_dev_owner_new(uint64_t *owner_id)
622 {
623         eth_dev_shared_data_prepare();
624
625         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
626
627         *owner_id = eth_dev_shared_data->next_owner_id++;
628
629         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
630         return 0;
631 }
632
633 static int
634 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
635                        const struct rte_eth_dev_owner *new_owner)
636 {
637         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
638         struct rte_eth_dev_owner *port_owner;
639
640         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
641                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
642                         port_id);
643                 return -ENODEV;
644         }
645
646         if (!eth_is_valid_owner_id(new_owner->id) &&
647             !eth_is_valid_owner_id(old_owner_id)) {
648                 RTE_ETHDEV_LOG(ERR,
649                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
650                        old_owner_id, new_owner->id);
651                 return -EINVAL;
652         }
653
654         port_owner = &rte_eth_devices[port_id].data->owner;
655         if (port_owner->id != old_owner_id) {
656                 RTE_ETHDEV_LOG(ERR,
657                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
658                         port_id, port_owner->name, port_owner->id);
659                 return -EPERM;
660         }
661
662         /* can not truncate (same structure) */
663         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
664
665         port_owner->id = new_owner->id;
666
667         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
668                 port_id, new_owner->name, new_owner->id);
669
670         return 0;
671 }
672
673 int
674 rte_eth_dev_owner_set(const uint16_t port_id,
675                       const struct rte_eth_dev_owner *owner)
676 {
677         int ret;
678
679         eth_dev_shared_data_prepare();
680
681         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
682
683         ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
684
685         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
686         return ret;
687 }
688
689 int
690 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
691 {
692         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
693                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
694         int ret;
695
696         eth_dev_shared_data_prepare();
697
698         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
699
700         ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
701
702         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
703         return ret;
704 }
705
706 int
707 rte_eth_dev_owner_delete(const uint64_t owner_id)
708 {
709         uint16_t port_id;
710         int ret = 0;
711
712         eth_dev_shared_data_prepare();
713
714         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
715
716         if (eth_is_valid_owner_id(owner_id)) {
717                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
718                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
719                                 memset(&rte_eth_devices[port_id].data->owner, 0,
720                                        sizeof(struct rte_eth_dev_owner));
721                 RTE_ETHDEV_LOG(NOTICE,
722                         "All port owners owned by %016"PRIx64" identifier have removed\n",
723                         owner_id);
724         } else {
725                 RTE_ETHDEV_LOG(ERR,
726                                "Invalid owner id=%016"PRIx64"\n",
727                                owner_id);
728                 ret = -EINVAL;
729         }
730
731         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
732
733         return ret;
734 }
735
736 int
737 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
738 {
739         int ret = 0;
740         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
741
742         eth_dev_shared_data_prepare();
743
744         rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
745
746         if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
747                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
748                         port_id);
749                 ret = -ENODEV;
750         } else {
751                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
752         }
753
754         rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
755         return ret;
756 }
757
758 int
759 rte_eth_dev_socket_id(uint16_t port_id)
760 {
761         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
762         return rte_eth_devices[port_id].data->numa_node;
763 }
764
765 void *
766 rte_eth_dev_get_sec_ctx(uint16_t port_id)
767 {
768         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
769         return rte_eth_devices[port_id].security_ctx;
770 }
771
772 uint16_t
773 rte_eth_dev_count_avail(void)
774 {
775         uint16_t p;
776         uint16_t count;
777
778         count = 0;
779
780         RTE_ETH_FOREACH_DEV(p)
781                 count++;
782
783         return count;
784 }
785
786 uint16_t
787 rte_eth_dev_count_total(void)
788 {
789         uint16_t port, count = 0;
790
791         RTE_ETH_FOREACH_VALID_DEV(port)
792                 count++;
793
794         return count;
795 }
796
797 int
798 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
799 {
800         char *tmp;
801
802         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
803
804         if (name == NULL) {
805                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
806                 return -EINVAL;
807         }
808
809         /* shouldn't check 'rte_eth_devices[i].data',
810          * because it might be overwritten by VDEV PMD */
811         tmp = eth_dev_shared_data->data[port_id].name;
812         strcpy(name, tmp);
813         return 0;
814 }
815
816 int
817 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
818 {
819         uint32_t pid;
820
821         if (name == NULL) {
822                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
823                 return -EINVAL;
824         }
825
826         RTE_ETH_FOREACH_VALID_DEV(pid)
827                 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
828                         *port_id = pid;
829                         return 0;
830                 }
831
832         return -ENODEV;
833 }
834
835 static int
836 eth_err(uint16_t port_id, int ret)
837 {
838         if (ret == 0)
839                 return 0;
840         if (rte_eth_dev_is_removed(port_id))
841                 return -EIO;
842         return ret;
843 }
844
845 static int
846 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
847 {
848         uint16_t old_nb_queues = dev->data->nb_rx_queues;
849         void **rxq;
850         unsigned i;
851
852         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
853                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
854                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
855                                 RTE_CACHE_LINE_SIZE);
856                 if (dev->data->rx_queues == NULL) {
857                         dev->data->nb_rx_queues = 0;
858                         return -(ENOMEM);
859                 }
860         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
861                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
862
863                 rxq = dev->data->rx_queues;
864
865                 for (i = nb_queues; i < old_nb_queues; i++)
866                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
867                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
868                                 RTE_CACHE_LINE_SIZE);
869                 if (rxq == NULL)
870                         return -(ENOMEM);
871                 if (nb_queues > old_nb_queues) {
872                         uint16_t new_qs = nb_queues - old_nb_queues;
873
874                         memset(rxq + old_nb_queues, 0,
875                                 sizeof(rxq[0]) * new_qs);
876                 }
877
878                 dev->data->rx_queues = rxq;
879
880         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
881                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
882
883                 rxq = dev->data->rx_queues;
884
885                 for (i = nb_queues; i < old_nb_queues; i++)
886                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
887
888                 rte_free(dev->data->rx_queues);
889                 dev->data->rx_queues = NULL;
890         }
891         dev->data->nb_rx_queues = nb_queues;
892         return 0;
893 }
894
895 static int
896 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
897 {
898         uint16_t port_id;
899
900         if (rx_queue_id >= dev->data->nb_rx_queues) {
901                 port_id = dev->data->port_id;
902                 RTE_ETHDEV_LOG(ERR,
903                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
904                                rx_queue_id, port_id);
905                 return -EINVAL;
906         }
907
908         if (dev->data->rx_queues[rx_queue_id] == NULL) {
909                 port_id = dev->data->port_id;
910                 RTE_ETHDEV_LOG(ERR,
911                                "Queue %u of device with port_id=%u has not been setup\n",
912                                rx_queue_id, port_id);
913                 return -EINVAL;
914         }
915
916         return 0;
917 }
918
919 static int
920 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
921 {
922         uint16_t port_id;
923
924         if (tx_queue_id >= dev->data->nb_tx_queues) {
925                 port_id = dev->data->port_id;
926                 RTE_ETHDEV_LOG(ERR,
927                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
928                                tx_queue_id, port_id);
929                 return -EINVAL;
930         }
931
932         if (dev->data->tx_queues[tx_queue_id] == NULL) {
933                 port_id = dev->data->port_id;
934                 RTE_ETHDEV_LOG(ERR,
935                                "Queue %u of device with port_id=%u has not been setup\n",
936                                tx_queue_id, port_id);
937                 return -EINVAL;
938         }
939
940         return 0;
941 }
942
943 int
944 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
945 {
946         struct rte_eth_dev *dev;
947         int ret;
948
949         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
950
951         dev = &rte_eth_devices[port_id];
952         if (!dev->data->dev_started) {
953                 RTE_ETHDEV_LOG(ERR,
954                         "Port %u must be started before start any queue\n",
955                         port_id);
956                 return -EINVAL;
957         }
958
959         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
960         if (ret != 0)
961                 return ret;
962
963         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
964
965         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
966                 RTE_ETHDEV_LOG(INFO,
967                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
968                         rx_queue_id, port_id);
969                 return -EINVAL;
970         }
971
972         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
973                 RTE_ETHDEV_LOG(INFO,
974                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
975                         rx_queue_id, port_id);
976                 return 0;
977         }
978
979         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
980                                                              rx_queue_id));
981
982 }
983
984 int
985 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
986 {
987         struct rte_eth_dev *dev;
988         int ret;
989
990         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
991
992         dev = &rte_eth_devices[port_id];
993
994         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
995         if (ret != 0)
996                 return ret;
997
998         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
999
1000         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1001                 RTE_ETHDEV_LOG(INFO,
1002                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1003                         rx_queue_id, port_id);
1004                 return -EINVAL;
1005         }
1006
1007         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1008                 RTE_ETHDEV_LOG(INFO,
1009                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1010                         rx_queue_id, port_id);
1011                 return 0;
1012         }
1013
1014         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1015
1016 }
1017
1018 int
1019 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1020 {
1021         struct rte_eth_dev *dev;
1022         int ret;
1023
1024         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1025
1026         dev = &rte_eth_devices[port_id];
1027         if (!dev->data->dev_started) {
1028                 RTE_ETHDEV_LOG(ERR,
1029                         "Port %u must be started before start any queue\n",
1030                         port_id);
1031                 return -EINVAL;
1032         }
1033
1034         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1035         if (ret != 0)
1036                 return ret;
1037
1038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1039
1040         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1041                 RTE_ETHDEV_LOG(INFO,
1042                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1043                         tx_queue_id, port_id);
1044                 return -EINVAL;
1045         }
1046
1047         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1048                 RTE_ETHDEV_LOG(INFO,
1049                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1050                         tx_queue_id, port_id);
1051                 return 0;
1052         }
1053
1054         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1055 }
1056
1057 int
1058 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1059 {
1060         struct rte_eth_dev *dev;
1061         int ret;
1062
1063         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1064
1065         dev = &rte_eth_devices[port_id];
1066
1067         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1068         if (ret != 0)
1069                 return ret;
1070
1071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1072
1073         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1074                 RTE_ETHDEV_LOG(INFO,
1075                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1076                         tx_queue_id, port_id);
1077                 return -EINVAL;
1078         }
1079
1080         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1081                 RTE_ETHDEV_LOG(INFO,
1082                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1083                         tx_queue_id, port_id);
1084                 return 0;
1085         }
1086
1087         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1088
1089 }
1090
1091 static int
1092 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1093 {
1094         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1095         void **txq;
1096         unsigned i;
1097
1098         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1099                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1100                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1101                                                    RTE_CACHE_LINE_SIZE);
1102                 if (dev->data->tx_queues == NULL) {
1103                         dev->data->nb_tx_queues = 0;
1104                         return -(ENOMEM);
1105                 }
1106         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1107                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1108
1109                 txq = dev->data->tx_queues;
1110
1111                 for (i = nb_queues; i < old_nb_queues; i++)
1112                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1113                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1114                                   RTE_CACHE_LINE_SIZE);
1115                 if (txq == NULL)
1116                         return -ENOMEM;
1117                 if (nb_queues > old_nb_queues) {
1118                         uint16_t new_qs = nb_queues - old_nb_queues;
1119
1120                         memset(txq + old_nb_queues, 0,
1121                                sizeof(txq[0]) * new_qs);
1122                 }
1123
1124                 dev->data->tx_queues = txq;
1125
1126         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1127                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1128
1129                 txq = dev->data->tx_queues;
1130
1131                 for (i = nb_queues; i < old_nb_queues; i++)
1132                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1133
1134                 rte_free(dev->data->tx_queues);
1135                 dev->data->tx_queues = NULL;
1136         }
1137         dev->data->nb_tx_queues = nb_queues;
1138         return 0;
1139 }
1140
1141 uint32_t
1142 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1143 {
1144         switch (speed) {
1145         case ETH_SPEED_NUM_10M:
1146                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1147         case ETH_SPEED_NUM_100M:
1148                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1149         case ETH_SPEED_NUM_1G:
1150                 return ETH_LINK_SPEED_1G;
1151         case ETH_SPEED_NUM_2_5G:
1152                 return ETH_LINK_SPEED_2_5G;
1153         case ETH_SPEED_NUM_5G:
1154                 return ETH_LINK_SPEED_5G;
1155         case ETH_SPEED_NUM_10G:
1156                 return ETH_LINK_SPEED_10G;
1157         case ETH_SPEED_NUM_20G:
1158                 return ETH_LINK_SPEED_20G;
1159         case ETH_SPEED_NUM_25G:
1160                 return ETH_LINK_SPEED_25G;
1161         case ETH_SPEED_NUM_40G:
1162                 return ETH_LINK_SPEED_40G;
1163         case ETH_SPEED_NUM_50G:
1164                 return ETH_LINK_SPEED_50G;
1165         case ETH_SPEED_NUM_56G:
1166                 return ETH_LINK_SPEED_56G;
1167         case ETH_SPEED_NUM_100G:
1168                 return ETH_LINK_SPEED_100G;
1169         case ETH_SPEED_NUM_200G:
1170                 return ETH_LINK_SPEED_200G;
1171         default:
1172                 return 0;
1173         }
1174 }
1175
1176 const char *
1177 rte_eth_dev_rx_offload_name(uint64_t offload)
1178 {
1179         const char *name = "UNKNOWN";
1180         unsigned int i;
1181
1182         for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1183                 if (offload == eth_dev_rx_offload_names[i].offload) {
1184                         name = eth_dev_rx_offload_names[i].name;
1185                         break;
1186                 }
1187         }
1188
1189         return name;
1190 }
1191
1192 const char *
1193 rte_eth_dev_tx_offload_name(uint64_t offload)
1194 {
1195         const char *name = "UNKNOWN";
1196         unsigned int i;
1197
1198         for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1199                 if (offload == eth_dev_tx_offload_names[i].offload) {
1200                         name = eth_dev_tx_offload_names[i].name;
1201                         break;
1202                 }
1203         }
1204
1205         return name;
1206 }
1207
1208 static inline int
1209 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1210                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1211 {
1212         int ret = 0;
1213
1214         if (dev_info_size == 0) {
1215                 if (config_size != max_rx_pkt_len) {
1216                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1217                                        " %u != %u is not allowed\n",
1218                                        port_id, config_size, max_rx_pkt_len);
1219                         ret = -EINVAL;
1220                 }
1221         } else if (config_size > dev_info_size) {
1222                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1223                                "> max allowed value %u\n", port_id, config_size,
1224                                dev_info_size);
1225                 ret = -EINVAL;
1226         } else if (config_size < RTE_ETHER_MIN_LEN) {
1227                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1228                                "< min allowed value %u\n", port_id, config_size,
1229                                (unsigned int)RTE_ETHER_MIN_LEN);
1230                 ret = -EINVAL;
1231         }
1232         return ret;
1233 }
1234
1235 /*
1236  * Validate offloads that are requested through rte_eth_dev_configure against
1237  * the offloads successfully set by the ethernet device.
1238  *
1239  * @param port_id
1240  *   The port identifier of the Ethernet device.
1241  * @param req_offloads
1242  *   The offloads that have been requested through `rte_eth_dev_configure`.
1243  * @param set_offloads
1244  *   The offloads successfully set by the ethernet device.
1245  * @param offload_type
1246  *   The offload type i.e. Rx/Tx string.
1247  * @param offload_name
1248  *   The function that prints the offload name.
1249  * @return
1250  *   - (0) if validation successful.
1251  *   - (-EINVAL) if requested offload has been silently disabled.
1252  *
1253  */
1254 static int
1255 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1256                   uint64_t set_offloads, const char *offload_type,
1257                   const char *(*offload_name)(uint64_t))
1258 {
1259         uint64_t offloads_diff = req_offloads ^ set_offloads;
1260         uint64_t offload;
1261         int ret = 0;
1262
1263         while (offloads_diff != 0) {
1264                 /* Check if any offload is requested but not enabled. */
1265                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1266                 if (offload & req_offloads) {
1267                         RTE_ETHDEV_LOG(ERR,
1268                                 "Port %u failed to enable %s offload %s\n",
1269                                 port_id, offload_type, offload_name(offload));
1270                         ret = -EINVAL;
1271                 }
1272
1273                 /* Check if offload couldn't be disabled. */
1274                 if (offload & set_offloads) {
1275                         RTE_ETHDEV_LOG(DEBUG,
1276                                 "Port %u %s offload %s is not requested but enabled\n",
1277                                 port_id, offload_type, offload_name(offload));
1278                 }
1279
1280                 offloads_diff &= ~offload;
1281         }
1282
1283         return ret;
1284 }
1285
1286 int
1287 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1288                       const struct rte_eth_conf *dev_conf)
1289 {
1290         struct rte_eth_dev *dev;
1291         struct rte_eth_dev_info dev_info;
1292         struct rte_eth_conf orig_conf;
1293         int diag;
1294         int ret;
1295
1296         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1297
1298         dev = &rte_eth_devices[port_id];
1299
1300         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1301
1302         if (dev->data->dev_started) {
1303                 RTE_ETHDEV_LOG(ERR,
1304                         "Port %u must be stopped to allow configuration\n",
1305                         port_id);
1306                 return -EBUSY;
1307         }
1308
1309          /* Store original config, as rollback required on failure */
1310         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1311
1312         /*
1313          * Copy the dev_conf parameter into the dev structure.
1314          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1315          */
1316         if (dev_conf != &dev->data->dev_conf)
1317                 memcpy(&dev->data->dev_conf, dev_conf,
1318                        sizeof(dev->data->dev_conf));
1319
1320         ret = rte_eth_dev_info_get(port_id, &dev_info);
1321         if (ret != 0)
1322                 goto rollback;
1323
1324         /* If number of queues specified by application for both Rx and Tx is
1325          * zero, use driver preferred values. This cannot be done individually
1326          * as it is valid for either Tx or Rx (but not both) to be zero.
1327          * If driver does not provide any preferred valued, fall back on
1328          * EAL defaults.
1329          */
1330         if (nb_rx_q == 0 && nb_tx_q == 0) {
1331                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1332                 if (nb_rx_q == 0)
1333                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1334                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1335                 if (nb_tx_q == 0)
1336                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1337         }
1338
1339         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1340                 RTE_ETHDEV_LOG(ERR,
1341                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1342                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1343                 ret = -EINVAL;
1344                 goto rollback;
1345         }
1346
1347         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1348                 RTE_ETHDEV_LOG(ERR,
1349                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1350                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1351                 ret = -EINVAL;
1352                 goto rollback;
1353         }
1354
1355         /*
1356          * Check that the numbers of RX and TX queues are not greater
1357          * than the maximum number of RX and TX queues supported by the
1358          * configured device.
1359          */
1360         if (nb_rx_q > dev_info.max_rx_queues) {
1361                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1362                         port_id, nb_rx_q, dev_info.max_rx_queues);
1363                 ret = -EINVAL;
1364                 goto rollback;
1365         }
1366
1367         if (nb_tx_q > dev_info.max_tx_queues) {
1368                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1369                         port_id, nb_tx_q, dev_info.max_tx_queues);
1370                 ret = -EINVAL;
1371                 goto rollback;
1372         }
1373
1374         /* Check that the device supports requested interrupts */
1375         if ((dev_conf->intr_conf.lsc == 1) &&
1376                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1377                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1378                         dev->device->driver->name);
1379                 ret = -EINVAL;
1380                 goto rollback;
1381         }
1382         if ((dev_conf->intr_conf.rmv == 1) &&
1383                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1384                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1385                         dev->device->driver->name);
1386                 ret = -EINVAL;
1387                 goto rollback;
1388         }
1389
1390         /*
1391          * If jumbo frames are enabled, check that the maximum RX packet
1392          * length is supported by the configured device.
1393          */
1394         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1395                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1396                         RTE_ETHDEV_LOG(ERR,
1397                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1398                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1399                                 dev_info.max_rx_pktlen);
1400                         ret = -EINVAL;
1401                         goto rollback;
1402                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1403                         RTE_ETHDEV_LOG(ERR,
1404                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1405                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1406                                 (unsigned int)RTE_ETHER_MIN_LEN);
1407                         ret = -EINVAL;
1408                         goto rollback;
1409                 }
1410         } else {
1411                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1412                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1413                         /* Use default value */
1414                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1415                                                         RTE_ETHER_MAX_LEN;
1416         }
1417
1418         /*
1419          * If LRO is enabled, check that the maximum aggregated packet
1420          * size is supported by the configured device.
1421          */
1422         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1423                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1424                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1425                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1426                 ret = eth_dev_check_lro_pkt_size(port_id,
1427                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1428                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1429                                 dev_info.max_lro_pkt_size);
1430                 if (ret != 0)
1431                         goto rollback;
1432         }
1433
1434         /* Any requested offloading must be within its device capabilities */
1435         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1436              dev_conf->rxmode.offloads) {
1437                 RTE_ETHDEV_LOG(ERR,
1438                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1439                         "capabilities 0x%"PRIx64" in %s()\n",
1440                         port_id, dev_conf->rxmode.offloads,
1441                         dev_info.rx_offload_capa,
1442                         __func__);
1443                 ret = -EINVAL;
1444                 goto rollback;
1445         }
1446         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1447              dev_conf->txmode.offloads) {
1448                 RTE_ETHDEV_LOG(ERR,
1449                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1450                         "capabilities 0x%"PRIx64" in %s()\n",
1451                         port_id, dev_conf->txmode.offloads,
1452                         dev_info.tx_offload_capa,
1453                         __func__);
1454                 ret = -EINVAL;
1455                 goto rollback;
1456         }
1457
1458         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1459                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1460
1461         /* Check that device supports requested rss hash functions. */
1462         if ((dev_info.flow_type_rss_offloads |
1463              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1464             dev_info.flow_type_rss_offloads) {
1465                 RTE_ETHDEV_LOG(ERR,
1466                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1467                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1468                         dev_info.flow_type_rss_offloads);
1469                 ret = -EINVAL;
1470                 goto rollback;
1471         }
1472
1473         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1474         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1475             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1476                 RTE_ETHDEV_LOG(ERR,
1477                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1478                         port_id,
1479                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1480                 ret = -EINVAL;
1481                 goto rollback;
1482         }
1483
1484         /*
1485          * Setup new number of RX/TX queues and reconfigure device.
1486          */
1487         diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1488         if (diag != 0) {
1489                 RTE_ETHDEV_LOG(ERR,
1490                         "Port%u eth_dev_rx_queue_config = %d\n",
1491                         port_id, diag);
1492                 ret = diag;
1493                 goto rollback;
1494         }
1495
1496         diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1497         if (diag != 0) {
1498                 RTE_ETHDEV_LOG(ERR,
1499                         "Port%u eth_dev_tx_queue_config = %d\n",
1500                         port_id, diag);
1501                 eth_dev_rx_queue_config(dev, 0);
1502                 ret = diag;
1503                 goto rollback;
1504         }
1505
1506         diag = (*dev->dev_ops->dev_configure)(dev);
1507         if (diag != 0) {
1508                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1509                         port_id, diag);
1510                 ret = eth_err(port_id, diag);
1511                 goto reset_queues;
1512         }
1513
1514         /* Initialize Rx profiling if enabled at compilation time. */
1515         diag = __rte_eth_dev_profile_init(port_id, dev);
1516         if (diag != 0) {
1517                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1518                         port_id, diag);
1519                 ret = eth_err(port_id, diag);
1520                 goto reset_queues;
1521         }
1522
1523         /* Validate Rx offloads. */
1524         diag = eth_dev_validate_offloads(port_id,
1525                         dev_conf->rxmode.offloads,
1526                         dev->data->dev_conf.rxmode.offloads, "Rx",
1527                         rte_eth_dev_rx_offload_name);
1528         if (diag != 0) {
1529                 ret = diag;
1530                 goto reset_queues;
1531         }
1532
1533         /* Validate Tx offloads. */
1534         diag = eth_dev_validate_offloads(port_id,
1535                         dev_conf->txmode.offloads,
1536                         dev->data->dev_conf.txmode.offloads, "Tx",
1537                         rte_eth_dev_tx_offload_name);
1538         if (diag != 0) {
1539                 ret = diag;
1540                 goto reset_queues;
1541         }
1542
1543         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1544         return 0;
1545 reset_queues:
1546         eth_dev_rx_queue_config(dev, 0);
1547         eth_dev_tx_queue_config(dev, 0);
1548 rollback:
1549         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1550
1551         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1552         return ret;
1553 }
1554
1555 void
1556 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1557 {
1558         if (dev->data->dev_started) {
1559                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1560                         dev->data->port_id);
1561                 return;
1562         }
1563
1564         eth_dev_rx_queue_config(dev, 0);
1565         eth_dev_tx_queue_config(dev, 0);
1566
1567         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1568 }
1569
1570 static void
1571 eth_dev_mac_restore(struct rte_eth_dev *dev,
1572                         struct rte_eth_dev_info *dev_info)
1573 {
1574         struct rte_ether_addr *addr;
1575         uint16_t i;
1576         uint32_t pool = 0;
1577         uint64_t pool_mask;
1578
1579         /* replay MAC address configuration including default MAC */
1580         addr = &dev->data->mac_addrs[0];
1581         if (*dev->dev_ops->mac_addr_set != NULL)
1582                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1583         else if (*dev->dev_ops->mac_addr_add != NULL)
1584                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1585
1586         if (*dev->dev_ops->mac_addr_add != NULL) {
1587                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1588                         addr = &dev->data->mac_addrs[i];
1589
1590                         /* skip zero address */
1591                         if (rte_is_zero_ether_addr(addr))
1592                                 continue;
1593
1594                         pool = 0;
1595                         pool_mask = dev->data->mac_pool_sel[i];
1596
1597                         do {
1598                                 if (pool_mask & 1ULL)
1599                                         (*dev->dev_ops->mac_addr_add)(dev,
1600                                                 addr, i, pool);
1601                                 pool_mask >>= 1;
1602                                 pool++;
1603                         } while (pool_mask);
1604                 }
1605         }
1606 }
1607
1608 static int
1609 eth_dev_config_restore(struct rte_eth_dev *dev,
1610                 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1611 {
1612         int ret;
1613
1614         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1615                 eth_dev_mac_restore(dev, dev_info);
1616
1617         /* replay promiscuous configuration */
1618         /*
1619          * use callbacks directly since we don't need port_id check and
1620          * would like to bypass the same value set
1621          */
1622         if (rte_eth_promiscuous_get(port_id) == 1 &&
1623             *dev->dev_ops->promiscuous_enable != NULL) {
1624                 ret = eth_err(port_id,
1625                               (*dev->dev_ops->promiscuous_enable)(dev));
1626                 if (ret != 0 && ret != -ENOTSUP) {
1627                         RTE_ETHDEV_LOG(ERR,
1628                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1629                                 port_id, rte_strerror(-ret));
1630                         return ret;
1631                 }
1632         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1633                    *dev->dev_ops->promiscuous_disable != NULL) {
1634                 ret = eth_err(port_id,
1635                               (*dev->dev_ops->promiscuous_disable)(dev));
1636                 if (ret != 0 && ret != -ENOTSUP) {
1637                         RTE_ETHDEV_LOG(ERR,
1638                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1639                                 port_id, rte_strerror(-ret));
1640                         return ret;
1641                 }
1642         }
1643
1644         /* replay all multicast configuration */
1645         /*
1646          * use callbacks directly since we don't need port_id check and
1647          * would like to bypass the same value set
1648          */
1649         if (rte_eth_allmulticast_get(port_id) == 1 &&
1650             *dev->dev_ops->allmulticast_enable != NULL) {
1651                 ret = eth_err(port_id,
1652                               (*dev->dev_ops->allmulticast_enable)(dev));
1653                 if (ret != 0 && ret != -ENOTSUP) {
1654                         RTE_ETHDEV_LOG(ERR,
1655                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1656                                 port_id, rte_strerror(-ret));
1657                         return ret;
1658                 }
1659         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1660                    *dev->dev_ops->allmulticast_disable != NULL) {
1661                 ret = eth_err(port_id,
1662                               (*dev->dev_ops->allmulticast_disable)(dev));
1663                 if (ret != 0 && ret != -ENOTSUP) {
1664                         RTE_ETHDEV_LOG(ERR,
1665                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1666                                 port_id, rte_strerror(-ret));
1667                         return ret;
1668                 }
1669         }
1670
1671         return 0;
1672 }
1673
1674 int
1675 rte_eth_dev_start(uint16_t port_id)
1676 {
1677         struct rte_eth_dev *dev;
1678         struct rte_eth_dev_info dev_info;
1679         int diag;
1680         int ret, ret_stop;
1681
1682         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1683
1684         dev = &rte_eth_devices[port_id];
1685
1686         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1687
1688         if (dev->data->dev_started != 0) {
1689                 RTE_ETHDEV_LOG(INFO,
1690                         "Device with port_id=%"PRIu16" already started\n",
1691                         port_id);
1692                 return 0;
1693         }
1694
1695         ret = rte_eth_dev_info_get(port_id, &dev_info);
1696         if (ret != 0)
1697                 return ret;
1698
1699         /* Lets restore MAC now if device does not support live change */
1700         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1701                 eth_dev_mac_restore(dev, &dev_info);
1702
1703         diag = (*dev->dev_ops->dev_start)(dev);
1704         if (diag == 0)
1705                 dev->data->dev_started = 1;
1706         else
1707                 return eth_err(port_id, diag);
1708
1709         ret = eth_dev_config_restore(dev, &dev_info, port_id);
1710         if (ret != 0) {
1711                 RTE_ETHDEV_LOG(ERR,
1712                         "Error during restoring configuration for device (port %u): %s\n",
1713                         port_id, rte_strerror(-ret));
1714                 ret_stop = rte_eth_dev_stop(port_id);
1715                 if (ret_stop != 0) {
1716                         RTE_ETHDEV_LOG(ERR,
1717                                 "Failed to stop device (port %u): %s\n",
1718                                 port_id, rte_strerror(-ret_stop));
1719                 }
1720
1721                 return ret;
1722         }
1723
1724         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1725                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1726                 (*dev->dev_ops->link_update)(dev, 0);
1727         }
1728
1729         rte_ethdev_trace_start(port_id);
1730         return 0;
1731 }
1732
1733 int
1734 rte_eth_dev_stop(uint16_t port_id)
1735 {
1736         struct rte_eth_dev *dev;
1737         int ret;
1738
1739         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1740         dev = &rte_eth_devices[port_id];
1741
1742         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1743
1744         if (dev->data->dev_started == 0) {
1745                 RTE_ETHDEV_LOG(INFO,
1746                         "Device with port_id=%"PRIu16" already stopped\n",
1747                         port_id);
1748                 return 0;
1749         }
1750
1751         dev->data->dev_started = 0;
1752         ret = (*dev->dev_ops->dev_stop)(dev);
1753         rte_ethdev_trace_stop(port_id, ret);
1754
1755         return ret;
1756 }
1757
1758 int
1759 rte_eth_dev_set_link_up(uint16_t port_id)
1760 {
1761         struct rte_eth_dev *dev;
1762
1763         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1764
1765         dev = &rte_eth_devices[port_id];
1766
1767         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1768         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1769 }
1770
1771 int
1772 rte_eth_dev_set_link_down(uint16_t port_id)
1773 {
1774         struct rte_eth_dev *dev;
1775
1776         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1777
1778         dev = &rte_eth_devices[port_id];
1779
1780         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1781         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1782 }
1783
1784 int
1785 rte_eth_dev_close(uint16_t port_id)
1786 {
1787         struct rte_eth_dev *dev;
1788         int firsterr, binerr;
1789         int *lasterr = &firsterr;
1790
1791         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1792         dev = &rte_eth_devices[port_id];
1793
1794         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1795         *lasterr = (*dev->dev_ops->dev_close)(dev);
1796         if (*lasterr != 0)
1797                 lasterr = &binerr;
1798
1799         rte_ethdev_trace_close(port_id);
1800         *lasterr = rte_eth_dev_release_port(dev);
1801
1802         return eth_err(port_id, firsterr);
1803 }
1804
1805 int
1806 rte_eth_dev_reset(uint16_t port_id)
1807 {
1808         struct rte_eth_dev *dev;
1809         int ret;
1810
1811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1812         dev = &rte_eth_devices[port_id];
1813
1814         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1815
1816         ret = rte_eth_dev_stop(port_id);
1817         if (ret != 0) {
1818                 RTE_ETHDEV_LOG(ERR,
1819                         "Failed to stop device (port %u) before reset: %s - ignore\n",
1820                         port_id, rte_strerror(-ret));
1821         }
1822         ret = dev->dev_ops->dev_reset(dev);
1823
1824         return eth_err(port_id, ret);
1825 }
1826
1827 int
1828 rte_eth_dev_is_removed(uint16_t port_id)
1829 {
1830         struct rte_eth_dev *dev;
1831         int ret;
1832
1833         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1834
1835         dev = &rte_eth_devices[port_id];
1836
1837         if (dev->state == RTE_ETH_DEV_REMOVED)
1838                 return 1;
1839
1840         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1841
1842         ret = dev->dev_ops->is_removed(dev);
1843         if (ret != 0)
1844                 /* Device is physically removed. */
1845                 dev->state = RTE_ETH_DEV_REMOVED;
1846
1847         return ret;
1848 }
1849
1850 static int
1851 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1852                              uint16_t n_seg, uint32_t *mbp_buf_size,
1853                              const struct rte_eth_dev_info *dev_info)
1854 {
1855         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1856         struct rte_mempool *mp_first;
1857         uint32_t offset_mask;
1858         uint16_t seg_idx;
1859
1860         if (n_seg > seg_capa->max_nseg) {
1861                 RTE_ETHDEV_LOG(ERR,
1862                                "Requested Rx segments %u exceed supported %u\n",
1863                                n_seg, seg_capa->max_nseg);
1864                 return -EINVAL;
1865         }
1866         /*
1867          * Check the sizes and offsets against buffer sizes
1868          * for each segment specified in extended configuration.
1869          */
1870         mp_first = rx_seg[0].mp;
1871         offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1872         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1873                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1874                 uint32_t length = rx_seg[seg_idx].length;
1875                 uint32_t offset = rx_seg[seg_idx].offset;
1876
1877                 if (mpl == NULL) {
1878                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1879                         return -EINVAL;
1880                 }
1881                 if (seg_idx != 0 && mp_first != mpl &&
1882                     seg_capa->multi_pools == 0) {
1883                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1884                         return -ENOTSUP;
1885                 }
1886                 if (offset != 0) {
1887                         if (seg_capa->offset_allowed == 0) {
1888                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1889                                 return -ENOTSUP;
1890                         }
1891                         if (offset & offset_mask) {
1892                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1893                                                offset,
1894                                                seg_capa->offset_align_log2);
1895                                 return -EINVAL;
1896                         }
1897                 }
1898                 if (mpl->private_data_size <
1899                         sizeof(struct rte_pktmbuf_pool_private)) {
1900                         RTE_ETHDEV_LOG(ERR,
1901                                        "%s private_data_size %u < %u\n",
1902                                        mpl->name, mpl->private_data_size,
1903                                        (unsigned int)sizeof
1904                                         (struct rte_pktmbuf_pool_private));
1905                         return -ENOSPC;
1906                 }
1907                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1908                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1909                 length = length != 0 ? length : *mbp_buf_size;
1910                 if (*mbp_buf_size < length + offset) {
1911                         RTE_ETHDEV_LOG(ERR,
1912                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1913                                        mpl->name, *mbp_buf_size,
1914                                        length + offset, length, offset);
1915                         return -EINVAL;
1916                 }
1917         }
1918         return 0;
1919 }
1920
1921 int
1922 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1923                        uint16_t nb_rx_desc, unsigned int socket_id,
1924                        const struct rte_eth_rxconf *rx_conf,
1925                        struct rte_mempool *mp)
1926 {
1927         int ret;
1928         uint32_t mbp_buf_size;
1929         struct rte_eth_dev *dev;
1930         struct rte_eth_dev_info dev_info;
1931         struct rte_eth_rxconf local_conf;
1932         void **rxq;
1933
1934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1935
1936         dev = &rte_eth_devices[port_id];
1937         if (rx_queue_id >= dev->data->nb_rx_queues) {
1938                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1939                 return -EINVAL;
1940         }
1941
1942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1943
1944         ret = rte_eth_dev_info_get(port_id, &dev_info);
1945         if (ret != 0)
1946                 return ret;
1947
1948         if (mp != NULL) {
1949                 /* Single pool configuration check. */
1950                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1951                         RTE_ETHDEV_LOG(ERR,
1952                                        "Ambiguous segment configuration\n");
1953                         return -EINVAL;
1954                 }
1955                 /*
1956                  * Check the size of the mbuf data buffer, this value
1957                  * must be provided in the private data of the memory pool.
1958                  * First check that the memory pool(s) has a valid private data.
1959                  */
1960                 if (mp->private_data_size <
1961                                 sizeof(struct rte_pktmbuf_pool_private)) {
1962                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1963                                 mp->name, mp->private_data_size,
1964                                 (unsigned int)
1965                                 sizeof(struct rte_pktmbuf_pool_private));
1966                         return -ENOSPC;
1967                 }
1968                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1969                 if (mbp_buf_size < dev_info.min_rx_bufsize +
1970                                    RTE_PKTMBUF_HEADROOM) {
1971                         RTE_ETHDEV_LOG(ERR,
1972                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1973                                        mp->name, mbp_buf_size,
1974                                        RTE_PKTMBUF_HEADROOM +
1975                                        dev_info.min_rx_bufsize,
1976                                        RTE_PKTMBUF_HEADROOM,
1977                                        dev_info.min_rx_bufsize);
1978                         return -EINVAL;
1979                 }
1980         } else {
1981                 const struct rte_eth_rxseg_split *rx_seg =
1982                         (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
1983                 uint16_t n_seg = rx_conf->rx_nseg;
1984
1985                 /* Extended multi-segment configuration check. */
1986                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
1987                         RTE_ETHDEV_LOG(ERR,
1988                                        "Memory pool is null and no extended configuration provided\n");
1989                         return -EINVAL;
1990                 }
1991                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1992                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
1993                                                            &mbp_buf_size,
1994                                                            &dev_info);
1995                         if (ret != 0)
1996                                 return ret;
1997                 } else {
1998                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
1999                         return -EINVAL;
2000                 }
2001         }
2002
2003         /* Use default specified by driver, if nb_rx_desc is zero */
2004         if (nb_rx_desc == 0) {
2005                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
2006                 /* If driver default is also zero, fall back on EAL default */
2007                 if (nb_rx_desc == 0)
2008                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2009         }
2010
2011         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2012                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2013                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2014
2015                 RTE_ETHDEV_LOG(ERR,
2016                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2017                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2018                         dev_info.rx_desc_lim.nb_min,
2019                         dev_info.rx_desc_lim.nb_align);
2020                 return -EINVAL;
2021         }
2022
2023         if (dev->data->dev_started &&
2024                 !(dev_info.dev_capa &
2025                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2026                 return -EBUSY;
2027
2028         if (dev->data->dev_started &&
2029                 (dev->data->rx_queue_state[rx_queue_id] !=
2030                         RTE_ETH_QUEUE_STATE_STOPPED))
2031                 return -EBUSY;
2032
2033         rxq = dev->data->rx_queues;
2034         if (rxq[rx_queue_id]) {
2035                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2036                                         -ENOTSUP);
2037                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2038                 rxq[rx_queue_id] = NULL;
2039         }
2040
2041         if (rx_conf == NULL)
2042                 rx_conf = &dev_info.default_rxconf;
2043
2044         local_conf = *rx_conf;
2045
2046         /*
2047          * If an offloading has already been enabled in
2048          * rte_eth_dev_configure(), it has been enabled on all queues,
2049          * so there is no need to enable it in this queue again.
2050          * The local_conf.offloads input to underlying PMD only carries
2051          * those offloadings which are only enabled on this queue and
2052          * not enabled on all queues.
2053          */
2054         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2055
2056         /*
2057          * New added offloadings for this queue are those not enabled in
2058          * rte_eth_dev_configure() and they must be per-queue type.
2059          * A pure per-port offloading can't be enabled on a queue while
2060          * disabled on another queue. A pure per-port offloading can't
2061          * be enabled for any queue as new added one if it hasn't been
2062          * enabled in rte_eth_dev_configure().
2063          */
2064         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2065              local_conf.offloads) {
2066                 RTE_ETHDEV_LOG(ERR,
2067                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2068                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2069                         port_id, rx_queue_id, local_conf.offloads,
2070                         dev_info.rx_queue_offload_capa,
2071                         __func__);
2072                 return -EINVAL;
2073         }
2074
2075         /*
2076          * If LRO is enabled, check that the maximum aggregated packet
2077          * size is supported by the configured device.
2078          */
2079         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2080                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2081                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
2082                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2083                 int ret = eth_dev_check_lro_pkt_size(port_id,
2084                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2085                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2086                                 dev_info.max_lro_pkt_size);
2087                 if (ret != 0)
2088                         return ret;
2089         }
2090
2091         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2092                                               socket_id, &local_conf, mp);
2093         if (!ret) {
2094                 if (!dev->data->min_rx_buf_size ||
2095                     dev->data->min_rx_buf_size > mbp_buf_size)
2096                         dev->data->min_rx_buf_size = mbp_buf_size;
2097         }
2098
2099         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2100                 rx_conf, ret);
2101         return eth_err(port_id, ret);
2102 }
2103
2104 int
2105 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2106                                uint16_t nb_rx_desc,
2107                                const struct rte_eth_hairpin_conf *conf)
2108 {
2109         int ret;
2110         struct rte_eth_dev *dev;
2111         struct rte_eth_hairpin_cap cap;
2112         void **rxq;
2113         int i;
2114         int count;
2115
2116         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2117
2118         dev = &rte_eth_devices[port_id];
2119         if (rx_queue_id >= dev->data->nb_rx_queues) {
2120                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2121                 return -EINVAL;
2122         }
2123         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2124         if (ret != 0)
2125                 return ret;
2126         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2127                                 -ENOTSUP);
2128         /* if nb_rx_desc is zero use max number of desc from the driver. */
2129         if (nb_rx_desc == 0)
2130                 nb_rx_desc = cap.max_nb_desc;
2131         if (nb_rx_desc > cap.max_nb_desc) {
2132                 RTE_ETHDEV_LOG(ERR,
2133                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2134                         nb_rx_desc, cap.max_nb_desc);
2135                 return -EINVAL;
2136         }
2137         if (conf->peer_count > cap.max_rx_2_tx) {
2138                 RTE_ETHDEV_LOG(ERR,
2139                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2140                         conf->peer_count, cap.max_rx_2_tx);
2141                 return -EINVAL;
2142         }
2143         if (conf->peer_count == 0) {
2144                 RTE_ETHDEV_LOG(ERR,
2145                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2146                         conf->peer_count);
2147                 return -EINVAL;
2148         }
2149         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2150              cap.max_nb_queues != UINT16_MAX; i++) {
2151                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2152                         count++;
2153         }
2154         if (count > cap.max_nb_queues) {
2155                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2156                 cap.max_nb_queues);
2157                 return -EINVAL;
2158         }
2159         if (dev->data->dev_started)
2160                 return -EBUSY;
2161         rxq = dev->data->rx_queues;
2162         if (rxq[rx_queue_id] != NULL) {
2163                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2164                                         -ENOTSUP);
2165                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2166                 rxq[rx_queue_id] = NULL;
2167         }
2168         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2169                                                       nb_rx_desc, conf);
2170         if (ret == 0)
2171                 dev->data->rx_queue_state[rx_queue_id] =
2172                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2173         return eth_err(port_id, ret);
2174 }
2175
2176 int
2177 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2178                        uint16_t nb_tx_desc, unsigned int socket_id,
2179                        const struct rte_eth_txconf *tx_conf)
2180 {
2181         struct rte_eth_dev *dev;
2182         struct rte_eth_dev_info dev_info;
2183         struct rte_eth_txconf local_conf;
2184         void **txq;
2185         int ret;
2186
2187         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2188
2189         dev = &rte_eth_devices[port_id];
2190         if (tx_queue_id >= dev->data->nb_tx_queues) {
2191                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2192                 return -EINVAL;
2193         }
2194
2195         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2196
2197         ret = rte_eth_dev_info_get(port_id, &dev_info);
2198         if (ret != 0)
2199                 return ret;
2200
2201         /* Use default specified by driver, if nb_tx_desc is zero */
2202         if (nb_tx_desc == 0) {
2203                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2204                 /* If driver default is zero, fall back on EAL default */
2205                 if (nb_tx_desc == 0)
2206                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2207         }
2208         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2209             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2210             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2211                 RTE_ETHDEV_LOG(ERR,
2212                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2213                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2214                         dev_info.tx_desc_lim.nb_min,
2215                         dev_info.tx_desc_lim.nb_align);
2216                 return -EINVAL;
2217         }
2218
2219         if (dev->data->dev_started &&
2220                 !(dev_info.dev_capa &
2221                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2222                 return -EBUSY;
2223
2224         if (dev->data->dev_started &&
2225                 (dev->data->tx_queue_state[tx_queue_id] !=
2226                         RTE_ETH_QUEUE_STATE_STOPPED))
2227                 return -EBUSY;
2228
2229         txq = dev->data->tx_queues;
2230         if (txq[tx_queue_id]) {
2231                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2232                                         -ENOTSUP);
2233                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2234                 txq[tx_queue_id] = NULL;
2235         }
2236
2237         if (tx_conf == NULL)
2238                 tx_conf = &dev_info.default_txconf;
2239
2240         local_conf = *tx_conf;
2241
2242         /*
2243          * If an offloading has already been enabled in
2244          * rte_eth_dev_configure(), it has been enabled on all queues,
2245          * so there is no need to enable it in this queue again.
2246          * The local_conf.offloads input to underlying PMD only carries
2247          * those offloadings which are only enabled on this queue and
2248          * not enabled on all queues.
2249          */
2250         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2251
2252         /*
2253          * New added offloadings for this queue are those not enabled in
2254          * rte_eth_dev_configure() and they must be per-queue type.
2255          * A pure per-port offloading can't be enabled on a queue while
2256          * disabled on another queue. A pure per-port offloading can't
2257          * be enabled for any queue as new added one if it hasn't been
2258          * enabled in rte_eth_dev_configure().
2259          */
2260         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2261              local_conf.offloads) {
2262                 RTE_ETHDEV_LOG(ERR,
2263                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2264                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2265                         port_id, tx_queue_id, local_conf.offloads,
2266                         dev_info.tx_queue_offload_capa,
2267                         __func__);
2268                 return -EINVAL;
2269         }
2270
2271         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2272         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2273                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2274 }
2275
2276 int
2277 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2278                                uint16_t nb_tx_desc,
2279                                const struct rte_eth_hairpin_conf *conf)
2280 {
2281         struct rte_eth_dev *dev;
2282         struct rte_eth_hairpin_cap cap;
2283         void **txq;
2284         int i;
2285         int count;
2286         int ret;
2287
2288         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2289         dev = &rte_eth_devices[port_id];
2290         if (tx_queue_id >= dev->data->nb_tx_queues) {
2291                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2292                 return -EINVAL;
2293         }
2294         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2295         if (ret != 0)
2296                 return ret;
2297         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2298                                 -ENOTSUP);
2299         /* if nb_rx_desc is zero use max number of desc from the driver. */
2300         if (nb_tx_desc == 0)
2301                 nb_tx_desc = cap.max_nb_desc;
2302         if (nb_tx_desc > cap.max_nb_desc) {
2303                 RTE_ETHDEV_LOG(ERR,
2304                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2305                         nb_tx_desc, cap.max_nb_desc);
2306                 return -EINVAL;
2307         }
2308         if (conf->peer_count > cap.max_tx_2_rx) {
2309                 RTE_ETHDEV_LOG(ERR,
2310                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2311                         conf->peer_count, cap.max_tx_2_rx);
2312                 return -EINVAL;
2313         }
2314         if (conf->peer_count == 0) {
2315                 RTE_ETHDEV_LOG(ERR,
2316                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2317                         conf->peer_count);
2318                 return -EINVAL;
2319         }
2320         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2321              cap.max_nb_queues != UINT16_MAX; i++) {
2322                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2323                         count++;
2324         }
2325         if (count > cap.max_nb_queues) {
2326                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2327                 cap.max_nb_queues);
2328                 return -EINVAL;
2329         }
2330         if (dev->data->dev_started)
2331                 return -EBUSY;
2332         txq = dev->data->tx_queues;
2333         if (txq[tx_queue_id] != NULL) {
2334                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2335                                         -ENOTSUP);
2336                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2337                 txq[tx_queue_id] = NULL;
2338         }
2339         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2340                 (dev, tx_queue_id, nb_tx_desc, conf);
2341         if (ret == 0)
2342                 dev->data->tx_queue_state[tx_queue_id] =
2343                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2344         return eth_err(port_id, ret);
2345 }
2346
2347 int
2348 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2349 {
2350         struct rte_eth_dev *dev;
2351         int ret;
2352
2353         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2354         dev = &rte_eth_devices[tx_port];
2355         if (dev->data->dev_started == 0) {
2356                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2357                 return -EBUSY;
2358         }
2359
2360         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2361         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2362         if (ret != 0)
2363                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2364                                " to Rx %d (%d - all ports)\n",
2365                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2366
2367         return ret;
2368 }
2369
2370 int
2371 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2372 {
2373         struct rte_eth_dev *dev;
2374         int ret;
2375
2376         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2377         dev = &rte_eth_devices[tx_port];
2378         if (dev->data->dev_started == 0) {
2379                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2380                 return -EBUSY;
2381         }
2382
2383         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2384         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2385         if (ret != 0)
2386                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2387                                " from Rx %d (%d - all ports)\n",
2388                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2389
2390         return ret;
2391 }
2392
2393 int
2394 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2395                                size_t len, uint32_t direction)
2396 {
2397         struct rte_eth_dev *dev;
2398         int ret;
2399
2400         if (peer_ports == NULL || len == 0)
2401                 return -EINVAL;
2402
2403         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2404         dev = &rte_eth_devices[port_id];
2405         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2406                                 -ENOTSUP);
2407
2408         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2409                                                       len, direction);
2410         if (ret < 0)
2411                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2412                                port_id, direction ? "Rx" : "Tx");
2413
2414         return ret;
2415 }
2416
2417 void
2418 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2419                 void *userdata __rte_unused)
2420 {
2421         rte_pktmbuf_free_bulk(pkts, unsent);
2422 }
2423
2424 void
2425 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2426                 void *userdata)
2427 {
2428         uint64_t *count = userdata;
2429
2430         rte_pktmbuf_free_bulk(pkts, unsent);
2431         *count += unsent;
2432 }
2433
2434 int
2435 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2436                 buffer_tx_error_fn cbfn, void *userdata)
2437 {
2438         buffer->error_callback = cbfn;
2439         buffer->error_userdata = userdata;
2440         return 0;
2441 }
2442
2443 int
2444 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2445 {
2446         int ret = 0;
2447
2448         if (buffer == NULL)
2449                 return -EINVAL;
2450
2451         buffer->size = size;
2452         if (buffer->error_callback == NULL) {
2453                 ret = rte_eth_tx_buffer_set_err_callback(
2454                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2455         }
2456
2457         return ret;
2458 }
2459
2460 int
2461 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2462 {
2463         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2464         int ret;
2465
2466         /* Validate Input Data. Bail if not valid or not supported. */
2467         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2468         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2469
2470         /* Call driver to free pending mbufs. */
2471         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2472                                                free_cnt);
2473         return eth_err(port_id, ret);
2474 }
2475
2476 int
2477 rte_eth_promiscuous_enable(uint16_t port_id)
2478 {
2479         struct rte_eth_dev *dev;
2480         int diag = 0;
2481
2482         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2483         dev = &rte_eth_devices[port_id];
2484
2485         if (dev->data->promiscuous == 1)
2486                 return 0;
2487
2488         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2489
2490         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2491         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2492
2493         return eth_err(port_id, diag);
2494 }
2495
2496 int
2497 rte_eth_promiscuous_disable(uint16_t port_id)
2498 {
2499         struct rte_eth_dev *dev;
2500         int diag = 0;
2501
2502         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2503         dev = &rte_eth_devices[port_id];
2504
2505         if (dev->data->promiscuous == 0)
2506                 return 0;
2507
2508         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2509
2510         dev->data->promiscuous = 0;
2511         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2512         if (diag != 0)
2513                 dev->data->promiscuous = 1;
2514
2515         return eth_err(port_id, diag);
2516 }
2517
2518 int
2519 rte_eth_promiscuous_get(uint16_t port_id)
2520 {
2521         struct rte_eth_dev *dev;
2522
2523         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2524
2525         dev = &rte_eth_devices[port_id];
2526         return dev->data->promiscuous;
2527 }
2528
2529 int
2530 rte_eth_allmulticast_enable(uint16_t port_id)
2531 {
2532         struct rte_eth_dev *dev;
2533         int diag;
2534
2535         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2536         dev = &rte_eth_devices[port_id];
2537
2538         if (dev->data->all_multicast == 1)
2539                 return 0;
2540
2541         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2542         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2543         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2544
2545         return eth_err(port_id, diag);
2546 }
2547
2548 int
2549 rte_eth_allmulticast_disable(uint16_t port_id)
2550 {
2551         struct rte_eth_dev *dev;
2552         int diag;
2553
2554         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2555         dev = &rte_eth_devices[port_id];
2556
2557         if (dev->data->all_multicast == 0)
2558                 return 0;
2559
2560         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2561         dev->data->all_multicast = 0;
2562         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2563         if (diag != 0)
2564                 dev->data->all_multicast = 1;
2565
2566         return eth_err(port_id, diag);
2567 }
2568
2569 int
2570 rte_eth_allmulticast_get(uint16_t port_id)
2571 {
2572         struct rte_eth_dev *dev;
2573
2574         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2575
2576         dev = &rte_eth_devices[port_id];
2577         return dev->data->all_multicast;
2578 }
2579
2580 int
2581 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2582 {
2583         struct rte_eth_dev *dev;
2584
2585         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2586         dev = &rte_eth_devices[port_id];
2587
2588         if (dev->data->dev_conf.intr_conf.lsc &&
2589             dev->data->dev_started)
2590                 rte_eth_linkstatus_get(dev, eth_link);
2591         else {
2592                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2593                 (*dev->dev_ops->link_update)(dev, 1);
2594                 *eth_link = dev->data->dev_link;
2595         }
2596
2597         return 0;
2598 }
2599
2600 int
2601 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2602 {
2603         struct rte_eth_dev *dev;
2604
2605         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2606         dev = &rte_eth_devices[port_id];
2607
2608         if (dev->data->dev_conf.intr_conf.lsc &&
2609             dev->data->dev_started)
2610                 rte_eth_linkstatus_get(dev, eth_link);
2611         else {
2612                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2613                 (*dev->dev_ops->link_update)(dev, 0);
2614                 *eth_link = dev->data->dev_link;
2615         }
2616
2617         return 0;
2618 }
2619
2620 const char *
2621 rte_eth_link_speed_to_str(uint32_t link_speed)
2622 {
2623         switch (link_speed) {
2624         case ETH_SPEED_NUM_NONE: return "None";
2625         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2626         case ETH_SPEED_NUM_100M: return "100 Mbps";
2627         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2628         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2629         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2630         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2631         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2632         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2633         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2634         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2635         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2636         case ETH_SPEED_NUM_100G: return "100 Gbps";
2637         case ETH_SPEED_NUM_200G: return "200 Gbps";
2638         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2639         default: return "Invalid";
2640         }
2641 }
2642
2643 int
2644 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2645 {
2646         if (eth_link->link_status == ETH_LINK_DOWN)
2647                 return snprintf(str, len, "Link down");
2648         else
2649                 return snprintf(str, len, "Link up at %s %s %s",
2650                         rte_eth_link_speed_to_str(eth_link->link_speed),
2651                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2652                         "FDX" : "HDX",
2653                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2654                         "Autoneg" : "Fixed");
2655 }
2656
2657 int
2658 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2659 {
2660         struct rte_eth_dev *dev;
2661
2662         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2663
2664         dev = &rte_eth_devices[port_id];
2665         memset(stats, 0, sizeof(*stats));
2666
2667         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2668         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2669         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2670 }
2671
2672 int
2673 rte_eth_stats_reset(uint16_t port_id)
2674 {
2675         struct rte_eth_dev *dev;
2676         int ret;
2677
2678         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2679         dev = &rte_eth_devices[port_id];
2680
2681         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2682         ret = (*dev->dev_ops->stats_reset)(dev);
2683         if (ret != 0)
2684                 return eth_err(port_id, ret);
2685
2686         dev->data->rx_mbuf_alloc_failed = 0;
2687
2688         return 0;
2689 }
2690
2691 static inline int
2692 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2693 {
2694         uint16_t nb_rxqs, nb_txqs;
2695         int count;
2696
2697         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2698         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2699
2700         count = RTE_NB_STATS;
2701         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2702                 count += nb_rxqs * RTE_NB_RXQ_STATS;
2703                 count += nb_txqs * RTE_NB_TXQ_STATS;
2704         }
2705
2706         return count;
2707 }
2708
2709 static int
2710 eth_dev_get_xstats_count(uint16_t port_id)
2711 {
2712         struct rte_eth_dev *dev;
2713         int count;
2714
2715         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2716         dev = &rte_eth_devices[port_id];
2717         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2718                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2719                                 NULL, 0);
2720                 if (count < 0)
2721                         return eth_err(port_id, count);
2722         }
2723         if (dev->dev_ops->xstats_get_names != NULL) {
2724                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2725                 if (count < 0)
2726                         return eth_err(port_id, count);
2727         } else
2728                 count = 0;
2729
2730
2731         count += eth_dev_get_xstats_basic_count(dev);
2732
2733         return count;
2734 }
2735
2736 int
2737 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2738                 uint64_t *id)
2739 {
2740         int cnt_xstats, idx_xstat;
2741
2742         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2743
2744         if (!id) {
2745                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2746                 return -ENOMEM;
2747         }
2748
2749         if (!xstat_name) {
2750                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2751                 return -ENOMEM;
2752         }
2753
2754         /* Get count */
2755         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2756         if (cnt_xstats  < 0) {
2757                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2758                 return -ENODEV;
2759         }
2760
2761         /* Get id-name lookup table */
2762         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2763
2764         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2765                         port_id, xstats_names, cnt_xstats, NULL)) {
2766                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2767                 return -1;
2768         }
2769
2770         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2771                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2772                         *id = idx_xstat;
2773                         return 0;
2774                 };
2775         }
2776
2777         return -EINVAL;
2778 }
2779
2780 /* retrieve basic stats names */
2781 static int
2782 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2783         struct rte_eth_xstat_name *xstats_names)
2784 {
2785         int cnt_used_entries = 0;
2786         uint32_t idx, id_queue;
2787         uint16_t num_q;
2788
2789         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2790                 strlcpy(xstats_names[cnt_used_entries].name,
2791                         eth_dev_stats_strings[idx].name,
2792                         sizeof(xstats_names[0].name));
2793                 cnt_used_entries++;
2794         }
2795
2796         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2797                 return cnt_used_entries;
2798
2799         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2800         for (id_queue = 0; id_queue < num_q; id_queue++) {
2801                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2802                         snprintf(xstats_names[cnt_used_entries].name,
2803                                 sizeof(xstats_names[0].name),
2804                                 "rx_q%u_%s",
2805                                 id_queue, eth_dev_rxq_stats_strings[idx].name);
2806                         cnt_used_entries++;
2807                 }
2808
2809         }
2810         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2811         for (id_queue = 0; id_queue < num_q; id_queue++) {
2812                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2813                         snprintf(xstats_names[cnt_used_entries].name,
2814                                 sizeof(xstats_names[0].name),
2815                                 "tx_q%u_%s",
2816                                 id_queue, eth_dev_txq_stats_strings[idx].name);
2817                         cnt_used_entries++;
2818                 }
2819         }
2820         return cnt_used_entries;
2821 }
2822
2823 /* retrieve ethdev extended statistics names */
2824 int
2825 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2826         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2827         uint64_t *ids)
2828 {
2829         struct rte_eth_xstat_name *xstats_names_copy;
2830         unsigned int no_basic_stat_requested = 1;
2831         unsigned int no_ext_stat_requested = 1;
2832         unsigned int expected_entries;
2833         unsigned int basic_count;
2834         struct rte_eth_dev *dev;
2835         unsigned int i;
2836         int ret;
2837
2838         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2839         dev = &rte_eth_devices[port_id];
2840
2841         basic_count = eth_dev_get_xstats_basic_count(dev);
2842         ret = eth_dev_get_xstats_count(port_id);
2843         if (ret < 0)
2844                 return ret;
2845         expected_entries = (unsigned int)ret;
2846
2847         /* Return max number of stats if no ids given */
2848         if (!ids) {
2849                 if (!xstats_names)
2850                         return expected_entries;
2851                 else if (xstats_names && size < expected_entries)
2852                         return expected_entries;
2853         }
2854
2855         if (ids && !xstats_names)
2856                 return -EINVAL;
2857
2858         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2859                 uint64_t ids_copy[size];
2860
2861                 for (i = 0; i < size; i++) {
2862                         if (ids[i] < basic_count) {
2863                                 no_basic_stat_requested = 0;
2864                                 break;
2865                         }
2866
2867                         /*
2868                          * Convert ids to xstats ids that PMD knows.
2869                          * ids known by user are basic + extended stats.
2870                          */
2871                         ids_copy[i] = ids[i] - basic_count;
2872                 }
2873
2874                 if (no_basic_stat_requested)
2875                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2876                                         xstats_names, ids_copy, size);
2877         }
2878
2879         /* Retrieve all stats */
2880         if (!ids) {
2881                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2882                                 expected_entries);
2883                 if (num_stats < 0 || num_stats > (int)expected_entries)
2884                         return num_stats;
2885                 else
2886                         return expected_entries;
2887         }
2888
2889         xstats_names_copy = calloc(expected_entries,
2890                 sizeof(struct rte_eth_xstat_name));
2891
2892         if (!xstats_names_copy) {
2893                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2894                 return -ENOMEM;
2895         }
2896
2897         if (ids) {
2898                 for (i = 0; i < size; i++) {
2899                         if (ids[i] >= basic_count) {
2900                                 no_ext_stat_requested = 0;
2901                                 break;
2902                         }
2903                 }
2904         }
2905
2906         /* Fill xstats_names_copy structure */
2907         if (ids && no_ext_stat_requested) {
2908                 eth_basic_stats_get_names(dev, xstats_names_copy);
2909         } else {
2910                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2911                         expected_entries);
2912                 if (ret < 0) {
2913                         free(xstats_names_copy);
2914                         return ret;
2915                 }
2916         }
2917
2918         /* Filter stats */
2919         for (i = 0; i < size; i++) {
2920                 if (ids[i] >= expected_entries) {
2921                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2922                         free(xstats_names_copy);
2923                         return -1;
2924                 }
2925                 xstats_names[i] = xstats_names_copy[ids[i]];
2926         }
2927
2928         free(xstats_names_copy);
2929         return size;
2930 }
2931
2932 int
2933 rte_eth_xstats_get_names(uint16_t port_id,
2934         struct rte_eth_xstat_name *xstats_names,
2935         unsigned int size)
2936 {
2937         struct rte_eth_dev *dev;
2938         int cnt_used_entries;
2939         int cnt_expected_entries;
2940         int cnt_driver_entries;
2941
2942         cnt_expected_entries = eth_dev_get_xstats_count(port_id);
2943         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2944                         (int)size < cnt_expected_entries)
2945                 return cnt_expected_entries;
2946
2947         /* port_id checked in eth_dev_get_xstats_count() */
2948         dev = &rte_eth_devices[port_id];
2949
2950         cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
2951
2952         if (dev->dev_ops->xstats_get_names != NULL) {
2953                 /* If there are any driver-specific xstats, append them
2954                  * to end of list.
2955                  */
2956                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2957                         dev,
2958                         xstats_names + cnt_used_entries,
2959                         size - cnt_used_entries);
2960                 if (cnt_driver_entries < 0)
2961                         return eth_err(port_id, cnt_driver_entries);
2962                 cnt_used_entries += cnt_driver_entries;
2963         }
2964
2965         return cnt_used_entries;
2966 }
2967
2968
2969 static int
2970 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2971 {
2972         struct rte_eth_dev *dev;
2973         struct rte_eth_stats eth_stats;
2974         unsigned int count = 0, i, q;
2975         uint64_t val, *stats_ptr;
2976         uint16_t nb_rxqs, nb_txqs;
2977         int ret;
2978
2979         ret = rte_eth_stats_get(port_id, &eth_stats);
2980         if (ret < 0)
2981                 return ret;
2982
2983         dev = &rte_eth_devices[port_id];
2984
2985         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2986         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2987
2988         /* global stats */
2989         for (i = 0; i < RTE_NB_STATS; i++) {
2990                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2991                                         eth_dev_stats_strings[i].offset);
2992                 val = *stats_ptr;
2993                 xstats[count++].value = val;
2994         }
2995
2996         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2997                 return count;
2998
2999         /* per-rxq stats */
3000         for (q = 0; q < nb_rxqs; q++) {
3001                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3002                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3003                                         eth_dev_rxq_stats_strings[i].offset +
3004                                         q * sizeof(uint64_t));
3005                         val = *stats_ptr;
3006                         xstats[count++].value = val;
3007                 }
3008         }
3009
3010         /* per-txq stats */
3011         for (q = 0; q < nb_txqs; q++) {
3012                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3013                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3014                                         eth_dev_txq_stats_strings[i].offset +
3015                                         q * sizeof(uint64_t));
3016                         val = *stats_ptr;
3017                         xstats[count++].value = val;
3018                 }
3019         }
3020         return count;
3021 }
3022
3023 /* retrieve ethdev extended statistics */
3024 int
3025 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3026                          uint64_t *values, unsigned int size)
3027 {
3028         unsigned int no_basic_stat_requested = 1;
3029         unsigned int no_ext_stat_requested = 1;
3030         unsigned int num_xstats_filled;
3031         unsigned int basic_count;
3032         uint16_t expected_entries;
3033         struct rte_eth_dev *dev;
3034         unsigned int i;
3035         int ret;
3036
3037         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3038         ret = eth_dev_get_xstats_count(port_id);
3039         if (ret < 0)
3040                 return ret;
3041         expected_entries = (uint16_t)ret;
3042         struct rte_eth_xstat xstats[expected_entries];
3043         dev = &rte_eth_devices[port_id];
3044         basic_count = eth_dev_get_xstats_basic_count(dev);
3045
3046         /* Return max number of stats if no ids given */
3047         if (!ids) {
3048                 if (!values)
3049                         return expected_entries;
3050                 else if (values && size < expected_entries)
3051                         return expected_entries;
3052         }
3053
3054         if (ids && !values)
3055                 return -EINVAL;
3056
3057         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3058                 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3059                 uint64_t ids_copy[size];
3060
3061                 for (i = 0; i < size; i++) {
3062                         if (ids[i] < basic_count) {
3063                                 no_basic_stat_requested = 0;
3064                                 break;
3065                         }
3066
3067                         /*
3068                          * Convert ids to xstats ids that PMD knows.
3069                          * ids known by user are basic + extended stats.
3070                          */
3071                         ids_copy[i] = ids[i] - basic_count;
3072                 }
3073
3074                 if (no_basic_stat_requested)
3075                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3076                                         values, size);
3077         }
3078
3079         if (ids) {
3080                 for (i = 0; i < size; i++) {
3081                         if (ids[i] >= basic_count) {
3082                                 no_ext_stat_requested = 0;
3083                                 break;
3084                         }
3085                 }
3086         }
3087
3088         /* Fill the xstats structure */
3089         if (ids && no_ext_stat_requested)
3090                 ret = eth_basic_stats_get(port_id, xstats);
3091         else
3092                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3093
3094         if (ret < 0)
3095                 return ret;
3096         num_xstats_filled = (unsigned int)ret;
3097
3098         /* Return all stats */
3099         if (!ids) {
3100                 for (i = 0; i < num_xstats_filled; i++)
3101                         values[i] = xstats[i].value;
3102                 return expected_entries;
3103         }
3104
3105         /* Filter stats */
3106         for (i = 0; i < size; i++) {
3107                 if (ids[i] >= expected_entries) {
3108                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3109                         return -1;
3110                 }
3111                 values[i] = xstats[ids[i]].value;
3112         }
3113         return size;
3114 }
3115
3116 int
3117 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3118         unsigned int n)
3119 {
3120         struct rte_eth_dev *dev;
3121         unsigned int count = 0, i;
3122         signed int xcount = 0;
3123         uint16_t nb_rxqs, nb_txqs;
3124         int ret;
3125
3126         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3127
3128         dev = &rte_eth_devices[port_id];
3129
3130         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3131         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3132
3133         /* Return generic statistics */
3134         count = RTE_NB_STATS;
3135         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3136                 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3137
3138         /* implemented by the driver */
3139         if (dev->dev_ops->xstats_get != NULL) {
3140                 /* Retrieve the xstats from the driver at the end of the
3141                  * xstats struct.
3142                  */
3143                 xcount = (*dev->dev_ops->xstats_get)(dev,
3144                                      xstats ? xstats + count : NULL,
3145                                      (n > count) ? n - count : 0);
3146
3147                 if (xcount < 0)
3148                         return eth_err(port_id, xcount);
3149         }
3150
3151         if (n < count + xcount || xstats == NULL)
3152                 return count + xcount;
3153
3154         /* now fill the xstats structure */
3155         ret = eth_basic_stats_get(port_id, xstats);
3156         if (ret < 0)
3157                 return ret;
3158         count = ret;
3159
3160         for (i = 0; i < count; i++)
3161                 xstats[i].id = i;
3162         /* add an offset to driver-specific stats */
3163         for ( ; i < count + xcount; i++)
3164                 xstats[i].id += count;
3165
3166         return count + xcount;
3167 }
3168
3169 /* reset ethdev extended statistics */
3170 int
3171 rte_eth_xstats_reset(uint16_t port_id)
3172 {
3173         struct rte_eth_dev *dev;
3174
3175         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3176         dev = &rte_eth_devices[port_id];
3177
3178         /* implemented by the driver */
3179         if (dev->dev_ops->xstats_reset != NULL)
3180                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3181
3182         /* fallback to default */
3183         return rte_eth_stats_reset(port_id);
3184 }
3185
3186 static int
3187 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3188                 uint8_t stat_idx, uint8_t is_rx)
3189 {
3190         struct rte_eth_dev *dev;
3191
3192         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3193
3194         dev = &rte_eth_devices[port_id];
3195
3196         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3197
3198         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3199                 return -EINVAL;
3200
3201         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3202                 return -EINVAL;
3203
3204         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3205                 return -EINVAL;
3206
3207         return (*dev->dev_ops->queue_stats_mapping_set)
3208                         (dev, queue_id, stat_idx, is_rx);
3209 }
3210
3211
3212 int
3213 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3214                 uint8_t stat_idx)
3215 {
3216         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3217                                                 tx_queue_id,
3218                                                 stat_idx, STAT_QMAP_TX));
3219 }
3220
3221
3222 int
3223 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3224                 uint8_t stat_idx)
3225 {
3226         return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3227                                                 rx_queue_id,
3228                                                 stat_idx, STAT_QMAP_RX));
3229 }
3230
3231 int
3232 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3233 {
3234         struct rte_eth_dev *dev;
3235
3236         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3237         dev = &rte_eth_devices[port_id];
3238
3239         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3240         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3241                                                         fw_version, fw_size));
3242 }
3243
3244 int
3245 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3246 {
3247         struct rte_eth_dev *dev;
3248         const struct rte_eth_desc_lim lim = {
3249                 .nb_max = UINT16_MAX,
3250                 .nb_min = 0,
3251                 .nb_align = 1,
3252                 .nb_seg_max = UINT16_MAX,
3253                 .nb_mtu_seg_max = UINT16_MAX,
3254         };
3255         int diag;
3256
3257         /*
3258          * Init dev_info before port_id check since caller does not have
3259          * return status and does not know if get is successful or not.
3260          */
3261         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3262         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3263
3264         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3265         dev = &rte_eth_devices[port_id];
3266
3267         dev_info->rx_desc_lim = lim;
3268         dev_info->tx_desc_lim = lim;
3269         dev_info->device = dev->device;
3270         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3271         dev_info->max_mtu = UINT16_MAX;
3272
3273         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3274         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3275         if (diag != 0) {
3276                 /* Cleanup already filled in device information */
3277                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3278                 return eth_err(port_id, diag);
3279         }
3280
3281         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3282         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3283                         RTE_MAX_QUEUES_PER_PORT);
3284         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3285                         RTE_MAX_QUEUES_PER_PORT);
3286
3287         dev_info->driver_name = dev->device->driver->name;
3288         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3289         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3290
3291         dev_info->dev_flags = &dev->data->dev_flags;
3292
3293         return 0;
3294 }
3295
3296 int
3297 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3298                                  uint32_t *ptypes, int num)
3299 {
3300         int i, j;
3301         struct rte_eth_dev *dev;
3302         const uint32_t *all_ptypes;
3303
3304         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3305         dev = &rte_eth_devices[port_id];
3306         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3307         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3308
3309         if (!all_ptypes)
3310                 return 0;
3311
3312         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3313                 if (all_ptypes[i] & ptype_mask) {
3314                         if (j < num)
3315                                 ptypes[j] = all_ptypes[i];
3316                         j++;
3317                 }
3318
3319         return j;
3320 }
3321
3322 int
3323 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3324                                  uint32_t *set_ptypes, unsigned int num)
3325 {
3326         const uint32_t valid_ptype_masks[] = {
3327                 RTE_PTYPE_L2_MASK,
3328                 RTE_PTYPE_L3_MASK,
3329                 RTE_PTYPE_L4_MASK,
3330                 RTE_PTYPE_TUNNEL_MASK,
3331                 RTE_PTYPE_INNER_L2_MASK,
3332                 RTE_PTYPE_INNER_L3_MASK,
3333                 RTE_PTYPE_INNER_L4_MASK,
3334         };
3335         const uint32_t *all_ptypes;
3336         struct rte_eth_dev *dev;
3337         uint32_t unused_mask;
3338         unsigned int i, j;
3339         int ret;
3340
3341         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3342         dev = &rte_eth_devices[port_id];
3343
3344         if (num > 0 && set_ptypes == NULL)
3345                 return -EINVAL;
3346
3347         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3348                         *dev->dev_ops->dev_ptypes_set == NULL) {
3349                 ret = 0;
3350                 goto ptype_unknown;
3351         }
3352
3353         if (ptype_mask == 0) {
3354                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3355                                 ptype_mask);
3356                 goto ptype_unknown;
3357         }
3358
3359         unused_mask = ptype_mask;
3360         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3361                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3362                 if (mask && mask != valid_ptype_masks[i]) {
3363                         ret = -EINVAL;
3364                         goto ptype_unknown;
3365                 }
3366                 unused_mask &= ~valid_ptype_masks[i];
3367         }
3368
3369         if (unused_mask) {
3370                 ret = -EINVAL;
3371                 goto ptype_unknown;
3372         }
3373
3374         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3375         if (all_ptypes == NULL) {
3376                 ret = 0;
3377                 goto ptype_unknown;
3378         }
3379
3380         /*
3381          * Accommodate as many set_ptypes as possible. If the supplied
3382          * set_ptypes array is insufficient fill it partially.
3383          */
3384         for (i = 0, j = 0; set_ptypes != NULL &&
3385                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3386                 if (ptype_mask & all_ptypes[i]) {
3387                         if (j < num - 1) {
3388                                 set_ptypes[j] = all_ptypes[i];
3389                                 j++;
3390                                 continue;
3391                         }
3392                         break;
3393                 }
3394         }
3395
3396         if (set_ptypes != NULL && j < num)
3397                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3398
3399         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3400
3401 ptype_unknown:
3402         if (num > 0)
3403                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3404
3405         return ret;
3406 }
3407
3408 int
3409 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3410 {
3411         struct rte_eth_dev *dev;
3412
3413         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3414         dev = &rte_eth_devices[port_id];
3415         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3416
3417         return 0;
3418 }
3419
3420 int
3421 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3422 {
3423         struct rte_eth_dev *dev;
3424
3425         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3426
3427         dev = &rte_eth_devices[port_id];
3428         *mtu = dev->data->mtu;
3429         return 0;
3430 }
3431
3432 int
3433 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3434 {
3435         int ret;
3436         struct rte_eth_dev_info dev_info;
3437         struct rte_eth_dev *dev;
3438
3439         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3440         dev = &rte_eth_devices[port_id];
3441         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3442
3443         /*
3444          * Check if the device supports dev_infos_get, if it does not
3445          * skip min_mtu/max_mtu validation here as this requires values
3446          * that are populated within the call to rte_eth_dev_info_get()
3447          * which relies on dev->dev_ops->dev_infos_get.
3448          */
3449         if (*dev->dev_ops->dev_infos_get != NULL) {
3450                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3451                 if (ret != 0)
3452                         return ret;
3453
3454                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3455                         return -EINVAL;
3456         }
3457
3458         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3459         if (!ret)
3460                 dev->data->mtu = mtu;
3461
3462         return eth_err(port_id, ret);
3463 }
3464
3465 int
3466 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3467 {
3468         struct rte_eth_dev *dev;
3469         int ret;
3470
3471         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3472         dev = &rte_eth_devices[port_id];
3473         if (!(dev->data->dev_conf.rxmode.offloads &
3474               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3475                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3476                         port_id);
3477                 return -ENOSYS;
3478         }
3479
3480         if (vlan_id > 4095) {
3481                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3482                         port_id, vlan_id);
3483                 return -EINVAL;
3484         }
3485         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3486
3487         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3488         if (ret == 0) {
3489                 struct rte_vlan_filter_conf *vfc;
3490                 int vidx;
3491                 int vbit;
3492
3493                 vfc = &dev->data->vlan_filter_conf;
3494                 vidx = vlan_id / 64;
3495                 vbit = vlan_id % 64;
3496
3497                 if (on)
3498                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3499                 else
3500                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3501         }
3502
3503         return eth_err(port_id, ret);
3504 }
3505
3506 int
3507 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3508                                     int on)
3509 {
3510         struct rte_eth_dev *dev;
3511
3512         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3513         dev = &rte_eth_devices[port_id];
3514         if (rx_queue_id >= dev->data->nb_rx_queues) {
3515                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3516                 return -EINVAL;
3517         }
3518
3519         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3520         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3521
3522         return 0;
3523 }
3524
3525 int
3526 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3527                                 enum rte_vlan_type vlan_type,
3528                                 uint16_t tpid)
3529 {
3530         struct rte_eth_dev *dev;
3531
3532         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3533         dev = &rte_eth_devices[port_id];
3534         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3535
3536         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3537                                                                tpid));
3538 }
3539
3540 int
3541 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3542 {
3543         struct rte_eth_dev_info dev_info;
3544         struct rte_eth_dev *dev;
3545         int ret = 0;
3546         int mask = 0;
3547         int cur, org = 0;
3548         uint64_t orig_offloads;
3549         uint64_t dev_offloads;
3550         uint64_t new_offloads;
3551
3552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3553         dev = &rte_eth_devices[port_id];
3554
3555         /* save original values in case of failure */
3556         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3557         dev_offloads = orig_offloads;
3558
3559         /* check which option changed by application */
3560         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3561         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3562         if (cur != org) {
3563                 if (cur)
3564                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3565                 else
3566                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3567                 mask |= ETH_VLAN_STRIP_MASK;
3568         }
3569
3570         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3571         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3572         if (cur != org) {
3573                 if (cur)
3574                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3575                 else
3576                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3577                 mask |= ETH_VLAN_FILTER_MASK;
3578         }
3579
3580         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3581         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3582         if (cur != org) {
3583                 if (cur)
3584                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3585                 else
3586                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3587                 mask |= ETH_VLAN_EXTEND_MASK;
3588         }
3589
3590         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3591         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3592         if (cur != org) {
3593                 if (cur)
3594                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3595                 else
3596                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3597                 mask |= ETH_QINQ_STRIP_MASK;
3598         }
3599
3600         /*no change*/
3601         if (mask == 0)
3602                 return ret;
3603
3604         ret = rte_eth_dev_info_get(port_id, &dev_info);
3605         if (ret != 0)
3606                 return ret;
3607
3608         /* Rx VLAN offloading must be within its device capabilities */
3609         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3610                 new_offloads = dev_offloads & ~orig_offloads;
3611                 RTE_ETHDEV_LOG(ERR,
3612                         "Ethdev port_id=%u requested new added VLAN offloads "
3613                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3614                         "0x%" PRIx64 " in %s()\n",
3615                         port_id, new_offloads, dev_info.rx_offload_capa,
3616                         __func__);
3617                 return -EINVAL;
3618         }
3619
3620         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3621         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3622         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3623         if (ret) {
3624                 /* hit an error restore  original values */
3625                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3626         }
3627
3628         return eth_err(port_id, ret);
3629 }
3630
3631 int
3632 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3633 {
3634         struct rte_eth_dev *dev;
3635         uint64_t *dev_offloads;
3636         int ret = 0;
3637
3638         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3639         dev = &rte_eth_devices[port_id];
3640         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3641
3642         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3643                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3644
3645         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3646                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3647
3648         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3649                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3650
3651         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3652                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3653
3654         return ret;
3655 }
3656
3657 int
3658 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3659 {
3660         struct rte_eth_dev *dev;
3661
3662         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3663         dev = &rte_eth_devices[port_id];
3664         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3665
3666         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3667 }
3668
3669 int
3670 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3671 {
3672         struct rte_eth_dev *dev;
3673
3674         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3675         dev = &rte_eth_devices[port_id];
3676         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3677         memset(fc_conf, 0, sizeof(*fc_conf));
3678         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3679 }
3680
3681 int
3682 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3683 {
3684         struct rte_eth_dev *dev;
3685
3686         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3687         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3688                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3689                 return -EINVAL;
3690         }
3691
3692         dev = &rte_eth_devices[port_id];
3693         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3694         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3695 }
3696
3697 int
3698 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3699                                    struct rte_eth_pfc_conf *pfc_conf)
3700 {
3701         struct rte_eth_dev *dev;
3702
3703         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3704         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3705                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3706                 return -EINVAL;
3707         }
3708
3709         dev = &rte_eth_devices[port_id];
3710         /* High water, low water validation are device specific */
3711         if  (*dev->dev_ops->priority_flow_ctrl_set)
3712                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3713                                         (dev, pfc_conf));
3714         return -ENOTSUP;
3715 }
3716
3717 static int
3718 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3719                         uint16_t reta_size)
3720 {
3721         uint16_t i, num;
3722
3723         if (!reta_conf)
3724                 return -EINVAL;
3725
3726         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3727         for (i = 0; i < num; i++) {
3728                 if (reta_conf[i].mask)
3729                         return 0;
3730         }
3731
3732         return -EINVAL;
3733 }
3734
3735 static int
3736 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3737                          uint16_t reta_size,
3738                          uint16_t max_rxq)
3739 {
3740         uint16_t i, idx, shift;
3741
3742         if (!reta_conf)
3743                 return -EINVAL;
3744
3745         if (max_rxq == 0) {
3746                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3747                 return -EINVAL;
3748         }
3749
3750         for (i = 0; i < reta_size; i++) {
3751                 idx = i / RTE_RETA_GROUP_SIZE;
3752                 shift = i % RTE_RETA_GROUP_SIZE;
3753                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3754                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3755                         RTE_ETHDEV_LOG(ERR,
3756                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3757                                 idx, shift,
3758                                 reta_conf[idx].reta[shift], max_rxq);
3759                         return -EINVAL;
3760                 }
3761         }
3762
3763         return 0;
3764 }
3765
3766 int
3767 rte_eth_dev_rss_reta_update(uint16_t port_id,
3768                             struct rte_eth_rss_reta_entry64 *reta_conf,
3769                             uint16_t reta_size)
3770 {
3771         struct rte_eth_dev *dev;
3772         int ret;
3773
3774         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3775         /* Check mask bits */
3776         ret = eth_check_reta_mask(reta_conf, reta_size);
3777         if (ret < 0)
3778                 return ret;
3779
3780         dev = &rte_eth_devices[port_id];
3781
3782         /* Check entry value */
3783         ret = eth_check_reta_entry(reta_conf, reta_size,
3784                                 dev->data->nb_rx_queues);
3785         if (ret < 0)
3786                 return ret;
3787
3788         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3789         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3790                                                              reta_size));
3791 }
3792
3793 int
3794 rte_eth_dev_rss_reta_query(uint16_t port_id,
3795                            struct rte_eth_rss_reta_entry64 *reta_conf,
3796                            uint16_t reta_size)
3797 {
3798         struct rte_eth_dev *dev;
3799         int ret;
3800
3801         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3802
3803         /* Check mask bits */
3804         ret = eth_check_reta_mask(reta_conf, reta_size);
3805         if (ret < 0)
3806                 return ret;
3807
3808         dev = &rte_eth_devices[port_id];
3809         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3810         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3811                                                             reta_size));
3812 }
3813
3814 int
3815 rte_eth_dev_rss_hash_update(uint16_t port_id,
3816                             struct rte_eth_rss_conf *rss_conf)
3817 {
3818         struct rte_eth_dev *dev;
3819         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3820         int ret;
3821
3822         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3823
3824         ret = rte_eth_dev_info_get(port_id, &dev_info);
3825         if (ret != 0)
3826                 return ret;
3827
3828         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3829
3830         dev = &rte_eth_devices[port_id];
3831         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3832             dev_info.flow_type_rss_offloads) {
3833                 RTE_ETHDEV_LOG(ERR,
3834                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3835                         port_id, rss_conf->rss_hf,
3836                         dev_info.flow_type_rss_offloads);
3837                 return -EINVAL;
3838         }
3839         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3840         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3841                                                                  rss_conf));
3842 }
3843
3844 int
3845 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3846                               struct rte_eth_rss_conf *rss_conf)
3847 {
3848         struct rte_eth_dev *dev;
3849
3850         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3851         dev = &rte_eth_devices[port_id];
3852         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3853         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3854                                                                    rss_conf));
3855 }
3856
3857 int
3858 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3859                                 struct rte_eth_udp_tunnel *udp_tunnel)
3860 {
3861         struct rte_eth_dev *dev;
3862
3863         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3864         if (udp_tunnel == NULL) {
3865                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3866                 return -EINVAL;
3867         }
3868
3869         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3870                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3871                 return -EINVAL;
3872         }
3873
3874         dev = &rte_eth_devices[port_id];
3875         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3876         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3877                                                                 udp_tunnel));
3878 }
3879
3880 int
3881 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3882                                    struct rte_eth_udp_tunnel *udp_tunnel)
3883 {
3884         struct rte_eth_dev *dev;
3885
3886         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3887         dev = &rte_eth_devices[port_id];
3888
3889         if (udp_tunnel == NULL) {
3890                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3891                 return -EINVAL;
3892         }
3893
3894         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3895                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3896                 return -EINVAL;
3897         }
3898
3899         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3900         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3901                                                                 udp_tunnel));
3902 }
3903
3904 int
3905 rte_eth_led_on(uint16_t port_id)
3906 {
3907         struct rte_eth_dev *dev;
3908
3909         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3910         dev = &rte_eth_devices[port_id];
3911         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3912         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3913 }
3914
3915 int
3916 rte_eth_led_off(uint16_t port_id)
3917 {
3918         struct rte_eth_dev *dev;
3919
3920         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3921         dev = &rte_eth_devices[port_id];
3922         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3923         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3924 }
3925
3926 int
3927 rte_eth_fec_get_capability(uint16_t port_id,
3928                            struct rte_eth_fec_capa *speed_fec_capa,
3929                            unsigned int num)
3930 {
3931         struct rte_eth_dev *dev;
3932         int ret;
3933
3934         if (speed_fec_capa == NULL && num > 0)
3935                 return -EINVAL;
3936
3937         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3938         dev = &rte_eth_devices[port_id];
3939         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
3940         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
3941
3942         return ret;
3943 }
3944
3945 int
3946 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
3947 {
3948         struct rte_eth_dev *dev;
3949
3950         if (fec_capa == NULL)
3951                 return -EINVAL;
3952
3953         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3954         dev = &rte_eth_devices[port_id];
3955         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
3956         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
3957 }
3958
3959 int
3960 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
3961 {
3962         struct rte_eth_dev *dev;
3963
3964         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3965         dev = &rte_eth_devices[port_id];
3966         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
3967         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
3968 }
3969
3970 /*
3971  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3972  * an empty spot.
3973  */
3974 static int
3975 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3976 {
3977         struct rte_eth_dev_info dev_info;
3978         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3979         unsigned i;
3980         int ret;
3981
3982         ret = rte_eth_dev_info_get(port_id, &dev_info);
3983         if (ret != 0)
3984                 return -1;
3985
3986         for (i = 0; i < dev_info.max_mac_addrs; i++)
3987                 if (memcmp(addr, &dev->data->mac_addrs[i],
3988                                 RTE_ETHER_ADDR_LEN) == 0)
3989                         return i;
3990
3991         return -1;
3992 }
3993
3994 static const struct rte_ether_addr null_mac_addr;
3995
3996 int
3997 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3998                         uint32_t pool)
3999 {
4000         struct rte_eth_dev *dev;
4001         int index;
4002         uint64_t pool_mask;
4003         int ret;
4004
4005         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4006         dev = &rte_eth_devices[port_id];
4007         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4008
4009         if (rte_is_zero_ether_addr(addr)) {
4010                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4011                         port_id);
4012                 return -EINVAL;
4013         }
4014         if (pool >= ETH_64_POOLS) {
4015                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
4016                 return -EINVAL;
4017         }
4018
4019         index = eth_dev_get_mac_addr_index(port_id, addr);
4020         if (index < 0) {
4021                 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4022                 if (index < 0) {
4023                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4024                                 port_id);
4025                         return -ENOSPC;
4026                 }
4027         } else {
4028                 pool_mask = dev->data->mac_pool_sel[index];
4029
4030                 /* Check if both MAC address and pool is already there, and do nothing */
4031                 if (pool_mask & (1ULL << pool))
4032                         return 0;
4033         }
4034
4035         /* Update NIC */
4036         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4037
4038         if (ret == 0) {
4039                 /* Update address in NIC data structure */
4040                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4041
4042                 /* Update pool bitmap in NIC data structure */
4043                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
4044         }
4045
4046         return eth_err(port_id, ret);
4047 }
4048
4049 int
4050 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4051 {
4052         struct rte_eth_dev *dev;
4053         int index;
4054
4055         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4056         dev = &rte_eth_devices[port_id];
4057         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4058
4059         index = eth_dev_get_mac_addr_index(port_id, addr);
4060         if (index == 0) {
4061                 RTE_ETHDEV_LOG(ERR,
4062                         "Port %u: Cannot remove default MAC address\n",
4063                         port_id);
4064                 return -EADDRINUSE;
4065         } else if (index < 0)
4066                 return 0;  /* Do nothing if address wasn't found */
4067
4068         /* Update NIC */
4069         (*dev->dev_ops->mac_addr_remove)(dev, index);
4070
4071         /* Update address in NIC data structure */
4072         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4073
4074         /* reset pool bitmap */
4075         dev->data->mac_pool_sel[index] = 0;
4076
4077         return 0;
4078 }
4079
4080 int
4081 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4082 {
4083         struct rte_eth_dev *dev;
4084         int ret;
4085
4086         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4087
4088         if (!rte_is_valid_assigned_ether_addr(addr))
4089                 return -EINVAL;
4090
4091         dev = &rte_eth_devices[port_id];
4092         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4093
4094         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4095         if (ret < 0)
4096                 return ret;
4097
4098         /* Update default address in NIC data structure */
4099         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4100
4101         return 0;
4102 }
4103
4104
4105 /*
4106  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4107  * an empty spot.
4108  */
4109 static int
4110 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4111                 const struct rte_ether_addr *addr)
4112 {
4113         struct rte_eth_dev_info dev_info;
4114         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4115         unsigned i;
4116         int ret;
4117
4118         ret = rte_eth_dev_info_get(port_id, &dev_info);
4119         if (ret != 0)
4120                 return -1;
4121
4122         if (!dev->data->hash_mac_addrs)
4123                 return -1;
4124
4125         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4126                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4127                         RTE_ETHER_ADDR_LEN) == 0)
4128                         return i;
4129
4130         return -1;
4131 }
4132
4133 int
4134 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4135                                 uint8_t on)
4136 {
4137         int index;
4138         int ret;
4139         struct rte_eth_dev *dev;
4140
4141         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4142
4143         dev = &rte_eth_devices[port_id];
4144         if (rte_is_zero_ether_addr(addr)) {
4145                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4146                         port_id);
4147                 return -EINVAL;
4148         }
4149
4150         index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4151         /* Check if it's already there, and do nothing */
4152         if ((index >= 0) && on)
4153                 return 0;
4154
4155         if (index < 0) {
4156                 if (!on) {
4157                         RTE_ETHDEV_LOG(ERR,
4158                                 "Port %u: the MAC address was not set in UTA\n",
4159                                 port_id);
4160                         return -EINVAL;
4161                 }
4162
4163                 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4164                 if (index < 0) {
4165                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4166                                 port_id);
4167                         return -ENOSPC;
4168                 }
4169         }
4170
4171         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4172         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4173         if (ret == 0) {
4174                 /* Update address in NIC data structure */
4175                 if (on)
4176                         rte_ether_addr_copy(addr,
4177                                         &dev->data->hash_mac_addrs[index]);
4178                 else
4179                         rte_ether_addr_copy(&null_mac_addr,
4180                                         &dev->data->hash_mac_addrs[index]);
4181         }
4182
4183         return eth_err(port_id, ret);
4184 }
4185
4186 int
4187 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4188 {
4189         struct rte_eth_dev *dev;
4190
4191         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4192
4193         dev = &rte_eth_devices[port_id];
4194
4195         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4196         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4197                                                                        on));
4198 }
4199
4200 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4201                                         uint16_t tx_rate)
4202 {
4203         struct rte_eth_dev *dev;
4204         struct rte_eth_dev_info dev_info;
4205         struct rte_eth_link link;
4206         int ret;
4207
4208         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4209
4210         ret = rte_eth_dev_info_get(port_id, &dev_info);
4211         if (ret != 0)
4212                 return ret;
4213
4214         dev = &rte_eth_devices[port_id];
4215         link = dev->data->dev_link;
4216
4217         if (queue_idx > dev_info.max_tx_queues) {
4218                 RTE_ETHDEV_LOG(ERR,
4219                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4220                         port_id, queue_idx);
4221                 return -EINVAL;
4222         }
4223
4224         if (tx_rate > link.link_speed) {
4225                 RTE_ETHDEV_LOG(ERR,
4226                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4227                         tx_rate, link.link_speed);
4228                 return -EINVAL;
4229         }
4230
4231         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4232         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4233                                                         queue_idx, tx_rate));
4234 }
4235
4236 int
4237 rte_eth_mirror_rule_set(uint16_t port_id,
4238                         struct rte_eth_mirror_conf *mirror_conf,
4239                         uint8_t rule_id, uint8_t on)
4240 {
4241         struct rte_eth_dev *dev;
4242
4243         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4244         if (mirror_conf->rule_type == 0) {
4245                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4246                 return -EINVAL;
4247         }
4248
4249         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4250                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4251                         ETH_64_POOLS - 1);
4252                 return -EINVAL;
4253         }
4254
4255         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4256              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4257             (mirror_conf->pool_mask == 0)) {
4258                 RTE_ETHDEV_LOG(ERR,
4259                         "Invalid mirror pool, pool mask can not be 0\n");
4260                 return -EINVAL;
4261         }
4262
4263         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4264             mirror_conf->vlan.vlan_mask == 0) {
4265                 RTE_ETHDEV_LOG(ERR,
4266                         "Invalid vlan mask, vlan mask can not be 0\n");
4267                 return -EINVAL;
4268         }
4269
4270         dev = &rte_eth_devices[port_id];
4271         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4272
4273         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4274                                                 mirror_conf, rule_id, on));
4275 }
4276
4277 int
4278 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4279 {
4280         struct rte_eth_dev *dev;
4281
4282         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4283
4284         dev = &rte_eth_devices[port_id];
4285         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4286
4287         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
4288                                                                    rule_id));
4289 }
4290
4291 RTE_INIT(eth_dev_init_cb_lists)
4292 {
4293         int i;
4294
4295         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4296                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4297 }
4298
4299 int
4300 rte_eth_dev_callback_register(uint16_t port_id,
4301                         enum rte_eth_event_type event,
4302                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4303 {
4304         struct rte_eth_dev *dev;
4305         struct rte_eth_dev_callback *user_cb;
4306         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4307         uint16_t last_port;
4308
4309         if (!cb_fn)
4310                 return -EINVAL;
4311
4312         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4313                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4314                 return -EINVAL;
4315         }
4316
4317         if (port_id == RTE_ETH_ALL) {
4318                 next_port = 0;
4319                 last_port = RTE_MAX_ETHPORTS - 1;
4320         } else {
4321                 next_port = last_port = port_id;
4322         }
4323
4324         rte_spinlock_lock(&eth_dev_cb_lock);
4325
4326         do {
4327                 dev = &rte_eth_devices[next_port];
4328
4329                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4330                         if (user_cb->cb_fn == cb_fn &&
4331                                 user_cb->cb_arg == cb_arg &&
4332                                 user_cb->event == event) {
4333                                 break;
4334                         }
4335                 }
4336
4337                 /* create a new callback. */
4338                 if (user_cb == NULL) {
4339                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4340                                 sizeof(struct rte_eth_dev_callback), 0);
4341                         if (user_cb != NULL) {
4342                                 user_cb->cb_fn = cb_fn;
4343                                 user_cb->cb_arg = cb_arg;
4344                                 user_cb->event = event;
4345                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4346                                                   user_cb, next);
4347                         } else {
4348                                 rte_spinlock_unlock(&eth_dev_cb_lock);
4349                                 rte_eth_dev_callback_unregister(port_id, event,
4350                                                                 cb_fn, cb_arg);
4351                                 return -ENOMEM;
4352                         }
4353
4354                 }
4355         } while (++next_port <= last_port);
4356
4357         rte_spinlock_unlock(&eth_dev_cb_lock);
4358         return 0;
4359 }
4360
4361 int
4362 rte_eth_dev_callback_unregister(uint16_t port_id,
4363                         enum rte_eth_event_type event,
4364                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4365 {
4366         int ret;
4367         struct rte_eth_dev *dev;
4368         struct rte_eth_dev_callback *cb, *next;
4369         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4370         uint16_t last_port;
4371
4372         if (!cb_fn)
4373                 return -EINVAL;
4374
4375         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4376                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4377                 return -EINVAL;
4378         }
4379
4380         if (port_id == RTE_ETH_ALL) {
4381                 next_port = 0;
4382                 last_port = RTE_MAX_ETHPORTS - 1;
4383         } else {
4384                 next_port = last_port = port_id;
4385         }
4386
4387         rte_spinlock_lock(&eth_dev_cb_lock);
4388
4389         do {
4390                 dev = &rte_eth_devices[next_port];
4391                 ret = 0;
4392                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4393                      cb = next) {
4394
4395                         next = TAILQ_NEXT(cb, next);
4396
4397                         if (cb->cb_fn != cb_fn || cb->event != event ||
4398                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4399                                 continue;
4400
4401                         /*
4402                          * if this callback is not executing right now,
4403                          * then remove it.
4404                          */
4405                         if (cb->active == 0) {
4406                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4407                                 rte_free(cb);
4408                         } else {
4409                                 ret = -EAGAIN;
4410                         }
4411                 }
4412         } while (++next_port <= last_port);
4413
4414         rte_spinlock_unlock(&eth_dev_cb_lock);
4415         return ret;
4416 }
4417
4418 int
4419 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4420         enum rte_eth_event_type event, void *ret_param)
4421 {
4422         struct rte_eth_dev_callback *cb_lst;
4423         struct rte_eth_dev_callback dev_cb;
4424         int rc = 0;
4425
4426         rte_spinlock_lock(&eth_dev_cb_lock);
4427         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4428                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4429                         continue;
4430                 dev_cb = *cb_lst;
4431                 cb_lst->active = 1;
4432                 if (ret_param != NULL)
4433                         dev_cb.ret_param = ret_param;
4434
4435                 rte_spinlock_unlock(&eth_dev_cb_lock);
4436                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4437                                 dev_cb.cb_arg, dev_cb.ret_param);
4438                 rte_spinlock_lock(&eth_dev_cb_lock);
4439                 cb_lst->active = 0;
4440         }
4441         rte_spinlock_unlock(&eth_dev_cb_lock);
4442         return rc;
4443 }
4444
4445 void
4446 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4447 {
4448         if (dev == NULL)
4449                 return;
4450
4451         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4452
4453         dev->state = RTE_ETH_DEV_ATTACHED;
4454 }
4455
4456 int
4457 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4458 {
4459         uint32_t vec;
4460         struct rte_eth_dev *dev;
4461         struct rte_intr_handle *intr_handle;
4462         uint16_t qid;
4463         int rc;
4464
4465         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4466
4467         dev = &rte_eth_devices[port_id];
4468
4469         if (!dev->intr_handle) {
4470                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4471                 return -ENOTSUP;
4472         }
4473
4474         intr_handle = dev->intr_handle;
4475         if (!intr_handle->intr_vec) {
4476                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4477                 return -EPERM;
4478         }
4479
4480         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4481                 vec = intr_handle->intr_vec[qid];
4482                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4483                 if (rc && rc != -EEXIST) {
4484                         RTE_ETHDEV_LOG(ERR,
4485                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4486                                 port_id, qid, op, epfd, vec);
4487                 }
4488         }
4489
4490         return 0;
4491 }
4492
4493 int
4494 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4495 {
4496         struct rte_intr_handle *intr_handle;
4497         struct rte_eth_dev *dev;
4498         unsigned int efd_idx;
4499         uint32_t vec;
4500         int fd;
4501
4502         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4503
4504         dev = &rte_eth_devices[port_id];
4505
4506         if (queue_id >= dev->data->nb_rx_queues) {
4507                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4508                 return -1;
4509         }
4510
4511         if (!dev->intr_handle) {
4512                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4513                 return -1;
4514         }
4515
4516         intr_handle = dev->intr_handle;
4517         if (!intr_handle->intr_vec) {
4518                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4519                 return -1;
4520         }
4521
4522         vec = intr_handle->intr_vec[queue_id];
4523         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4524                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4525         fd = intr_handle->efds[efd_idx];
4526
4527         return fd;
4528 }
4529
4530 static inline int
4531 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4532                 const char *ring_name)
4533 {
4534         return snprintf(name, len, "eth_p%d_q%d_%s",
4535                         port_id, queue_id, ring_name);
4536 }
4537
4538 const struct rte_memzone *
4539 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4540                          uint16_t queue_id, size_t size, unsigned align,
4541                          int socket_id)
4542 {
4543         char z_name[RTE_MEMZONE_NAMESIZE];
4544         const struct rte_memzone *mz;
4545         int rc;
4546
4547         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4548                         queue_id, ring_name);
4549         if (rc >= RTE_MEMZONE_NAMESIZE) {
4550                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4551                 rte_errno = ENAMETOOLONG;
4552                 return NULL;
4553         }
4554
4555         mz = rte_memzone_lookup(z_name);
4556         if (mz) {
4557                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4558                                 size > mz->len ||
4559                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4560                         RTE_ETHDEV_LOG(ERR,
4561                                 "memzone %s does not justify the requested attributes\n",
4562                                 mz->name);
4563                         return NULL;
4564                 }
4565
4566                 return mz;
4567         }
4568
4569         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4570                         RTE_MEMZONE_IOVA_CONTIG, align);
4571 }
4572
4573 int
4574 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4575                 uint16_t queue_id)
4576 {
4577         char z_name[RTE_MEMZONE_NAMESIZE];
4578         const struct rte_memzone *mz;
4579         int rc = 0;
4580
4581         rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4582                         queue_id, ring_name);
4583         if (rc >= RTE_MEMZONE_NAMESIZE) {
4584                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4585                 return -ENAMETOOLONG;
4586         }
4587
4588         mz = rte_memzone_lookup(z_name);
4589         if (mz)
4590                 rc = rte_memzone_free(mz);
4591         else
4592                 rc = -ENOENT;
4593
4594         return rc;
4595 }
4596
4597 int
4598 rte_eth_dev_create(struct rte_device *device, const char *name,
4599         size_t priv_data_size,
4600         ethdev_bus_specific_init ethdev_bus_specific_init,
4601         void *bus_init_params,
4602         ethdev_init_t ethdev_init, void *init_params)
4603 {
4604         struct rte_eth_dev *ethdev;
4605         int retval;
4606
4607         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4608
4609         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4610                 ethdev = rte_eth_dev_allocate(name);
4611                 if (!ethdev)
4612                         return -ENODEV;
4613
4614                 if (priv_data_size) {
4615                         ethdev->data->dev_private = rte_zmalloc_socket(
4616                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4617                                 device->numa_node);
4618
4619                         if (!ethdev->data->dev_private) {
4620                                 RTE_ETHDEV_LOG(ERR,
4621                                         "failed to allocate private data\n");
4622                                 retval = -ENOMEM;
4623                                 goto probe_failed;
4624                         }
4625                 }
4626         } else {
4627                 ethdev = rte_eth_dev_attach_secondary(name);
4628                 if (!ethdev) {
4629                         RTE_ETHDEV_LOG(ERR,
4630                                 "secondary process attach failed, ethdev doesn't exist\n");
4631                         return  -ENODEV;
4632                 }
4633         }
4634
4635         ethdev->device = device;
4636
4637         if (ethdev_bus_specific_init) {
4638                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4639                 if (retval) {
4640                         RTE_ETHDEV_LOG(ERR,
4641                                 "ethdev bus specific initialisation failed\n");
4642                         goto probe_failed;
4643                 }
4644         }
4645
4646         retval = ethdev_init(ethdev, init_params);
4647         if (retval) {
4648                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4649                 goto probe_failed;
4650         }
4651
4652         rte_eth_dev_probing_finish(ethdev);
4653
4654         return retval;
4655
4656 probe_failed:
4657         rte_eth_dev_release_port(ethdev);
4658         return retval;
4659 }
4660
4661 int
4662 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4663         ethdev_uninit_t ethdev_uninit)
4664 {
4665         int ret;
4666
4667         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4668         if (!ethdev)
4669                 return -ENODEV;
4670
4671         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4672
4673         ret = ethdev_uninit(ethdev);
4674         if (ret)
4675                 return ret;
4676
4677         return rte_eth_dev_release_port(ethdev);
4678 }
4679
4680 int
4681 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4682                           int epfd, int op, void *data)
4683 {
4684         uint32_t vec;
4685         struct rte_eth_dev *dev;
4686         struct rte_intr_handle *intr_handle;
4687         int rc;
4688
4689         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4690
4691         dev = &rte_eth_devices[port_id];
4692         if (queue_id >= dev->data->nb_rx_queues) {
4693                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4694                 return -EINVAL;
4695         }
4696
4697         if (!dev->intr_handle) {
4698                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4699                 return -ENOTSUP;
4700         }
4701
4702         intr_handle = dev->intr_handle;
4703         if (!intr_handle->intr_vec) {
4704                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4705                 return -EPERM;
4706         }
4707
4708         vec = intr_handle->intr_vec[queue_id];
4709         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4710         if (rc && rc != -EEXIST) {
4711                 RTE_ETHDEV_LOG(ERR,
4712                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4713                         port_id, queue_id, op, epfd, vec);
4714                 return rc;
4715         }
4716
4717         return 0;
4718 }
4719
4720 int
4721 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4722                            uint16_t queue_id)
4723 {
4724         struct rte_eth_dev *dev;
4725         int ret;
4726
4727         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4728
4729         dev = &rte_eth_devices[port_id];
4730
4731         ret = eth_dev_validate_rx_queue(dev, queue_id);
4732         if (ret != 0)
4733                 return ret;
4734
4735         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4736         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4737                                                                 queue_id));
4738 }
4739
4740 int
4741 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4742                             uint16_t queue_id)
4743 {
4744         struct rte_eth_dev *dev;
4745         int ret;
4746
4747         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4748
4749         dev = &rte_eth_devices[port_id];
4750
4751         ret = eth_dev_validate_rx_queue(dev, queue_id);
4752         if (ret != 0)
4753                 return ret;
4754
4755         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4756         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4757                                                                 queue_id));
4758 }
4759
4760
4761 int
4762 rte_eth_dev_filter_supported(uint16_t port_id,
4763                              enum rte_filter_type filter_type)
4764 {
4765         struct rte_eth_dev *dev;
4766
4767         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4768
4769         dev = &rte_eth_devices[port_id];
4770         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4771         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4772                                 RTE_ETH_FILTER_NOP, NULL);
4773 }
4774
4775 int
4776 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4777                         enum rte_filter_op filter_op, void *arg)
4778 {
4779         struct rte_eth_dev *dev;
4780
4781         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4782
4783         dev = &rte_eth_devices[port_id];
4784         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4785         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4786                                                              filter_op, arg));
4787 }
4788
4789 const struct rte_eth_rxtx_callback *
4790 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4791                 rte_rx_callback_fn fn, void *user_param)
4792 {
4793 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4794         rte_errno = ENOTSUP;
4795         return NULL;
4796 #endif
4797         struct rte_eth_dev *dev;
4798
4799         /* check input parameters */
4800         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4801                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4802                 rte_errno = EINVAL;
4803                 return NULL;
4804         }
4805         dev = &rte_eth_devices[port_id];
4806         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4807                 rte_errno = EINVAL;
4808                 return NULL;
4809         }
4810         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4811
4812         if (cb == NULL) {
4813                 rte_errno = ENOMEM;
4814                 return NULL;
4815         }
4816
4817         cb->fn.rx = fn;
4818         cb->param = user_param;
4819
4820         rte_spinlock_lock(&eth_dev_rx_cb_lock);
4821         /* Add the callbacks in fifo order. */
4822         struct rte_eth_rxtx_callback *tail =
4823                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4824
4825         if (!tail) {
4826                 /* Stores to cb->fn and cb->param should complete before
4827                  * cb is visible to data plane.
4828                  */
4829                 __atomic_store_n(
4830                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4831                         cb, __ATOMIC_RELEASE);
4832
4833         } else {
4834                 while (tail->next)
4835                         tail = tail->next;
4836                 /* Stores to cb->fn and cb->param should complete before
4837                  * cb is visible to data plane.
4838                  */
4839                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4840         }
4841         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4842
4843         return cb;
4844 }
4845
4846 const struct rte_eth_rxtx_callback *
4847 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4848                 rte_rx_callback_fn fn, void *user_param)
4849 {
4850 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4851         rte_errno = ENOTSUP;
4852         return NULL;
4853 #endif
4854         /* check input parameters */
4855         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4856                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4857                 rte_errno = EINVAL;
4858                 return NULL;
4859         }
4860
4861         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4862
4863         if (cb == NULL) {
4864                 rte_errno = ENOMEM;
4865                 return NULL;
4866         }
4867
4868         cb->fn.rx = fn;
4869         cb->param = user_param;
4870
4871         rte_spinlock_lock(&eth_dev_rx_cb_lock);
4872         /* Add the callbacks at first position */
4873         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4874         /* Stores to cb->fn, cb->param and cb->next should complete before
4875          * cb is visible to data plane threads.
4876          */
4877         __atomic_store_n(
4878                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4879                 cb, __ATOMIC_RELEASE);
4880         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4881
4882         return cb;
4883 }
4884
4885 const struct rte_eth_rxtx_callback *
4886 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4887                 rte_tx_callback_fn fn, void *user_param)
4888 {
4889 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4890         rte_errno = ENOTSUP;
4891         return NULL;
4892 #endif
4893         struct rte_eth_dev *dev;
4894
4895         /* check input parameters */
4896         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4897                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4898                 rte_errno = EINVAL;
4899                 return NULL;
4900         }
4901
4902         dev = &rte_eth_devices[port_id];
4903         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4904                 rte_errno = EINVAL;
4905                 return NULL;
4906         }
4907
4908         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4909
4910         if (cb == NULL) {
4911                 rte_errno = ENOMEM;
4912                 return NULL;
4913         }
4914
4915         cb->fn.tx = fn;
4916         cb->param = user_param;
4917
4918         rte_spinlock_lock(&eth_dev_tx_cb_lock);
4919         /* Add the callbacks in fifo order. */
4920         struct rte_eth_rxtx_callback *tail =
4921                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4922
4923         if (!tail) {
4924                 /* Stores to cb->fn and cb->param should complete before
4925                  * cb is visible to data plane.
4926                  */
4927                 __atomic_store_n(
4928                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4929                         cb, __ATOMIC_RELEASE);
4930
4931         } else {
4932                 while (tail->next)
4933                         tail = tail->next;
4934                 /* Stores to cb->fn and cb->param should complete before
4935                  * cb is visible to data plane.
4936                  */
4937                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4938         }
4939         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4940
4941         return cb;
4942 }
4943
4944 int
4945 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4946                 const struct rte_eth_rxtx_callback *user_cb)
4947 {
4948 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4949         return -ENOTSUP;
4950 #endif
4951         /* Check input parameters. */
4952         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4953         if (user_cb == NULL ||
4954                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4955                 return -EINVAL;
4956
4957         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4958         struct rte_eth_rxtx_callback *cb;
4959         struct rte_eth_rxtx_callback **prev_cb;
4960         int ret = -EINVAL;
4961
4962         rte_spinlock_lock(&eth_dev_rx_cb_lock);
4963         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4964         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4965                 cb = *prev_cb;
4966                 if (cb == user_cb) {
4967                         /* Remove the user cb from the callback list. */
4968                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4969                         ret = 0;
4970                         break;
4971                 }
4972         }
4973         rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4974
4975         return ret;
4976 }
4977
4978 int
4979 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4980                 const struct rte_eth_rxtx_callback *user_cb)
4981 {
4982 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4983         return -ENOTSUP;
4984 #endif
4985         /* Check input parameters. */
4986         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4987         if (user_cb == NULL ||
4988                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4989                 return -EINVAL;
4990
4991         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4992         int ret = -EINVAL;
4993         struct rte_eth_rxtx_callback *cb;
4994         struct rte_eth_rxtx_callback **prev_cb;
4995
4996         rte_spinlock_lock(&eth_dev_tx_cb_lock);
4997         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4998         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4999                 cb = *prev_cb;
5000                 if (cb == user_cb) {
5001                         /* Remove the user cb from the callback list. */
5002                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5003                         ret = 0;
5004                         break;
5005                 }
5006         }
5007         rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5008
5009         return ret;
5010 }
5011
5012 int
5013 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5014         struct rte_eth_rxq_info *qinfo)
5015 {
5016         struct rte_eth_dev *dev;
5017
5018         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5019
5020         if (qinfo == NULL)
5021                 return -EINVAL;
5022
5023         dev = &rte_eth_devices[port_id];
5024         if (queue_id >= dev->data->nb_rx_queues) {
5025                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5026                 return -EINVAL;
5027         }
5028
5029         if (dev->data->rx_queues == NULL ||
5030                         dev->data->rx_queues[queue_id] == NULL) {
5031                 RTE_ETHDEV_LOG(ERR,
5032                                "Rx queue %"PRIu16" of device with port_id=%"
5033                                PRIu16" has not been setup\n",
5034                                queue_id, port_id);
5035                 return -EINVAL;
5036         }
5037
5038         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5039                 RTE_ETHDEV_LOG(INFO,
5040                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5041                         queue_id, port_id);
5042                 return -EINVAL;
5043         }
5044
5045         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5046
5047         memset(qinfo, 0, sizeof(*qinfo));
5048         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5049         return 0;
5050 }
5051
5052 int
5053 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5054         struct rte_eth_txq_info *qinfo)
5055 {
5056         struct rte_eth_dev *dev;
5057
5058         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5059
5060         if (qinfo == NULL)
5061                 return -EINVAL;
5062
5063         dev = &rte_eth_devices[port_id];
5064         if (queue_id >= dev->data->nb_tx_queues) {
5065                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5066                 return -EINVAL;
5067         }
5068
5069         if (dev->data->tx_queues == NULL ||
5070                         dev->data->tx_queues[queue_id] == NULL) {
5071                 RTE_ETHDEV_LOG(ERR,
5072                                "Tx queue %"PRIu16" of device with port_id=%"
5073                                PRIu16" has not been setup\n",
5074                                queue_id, port_id);
5075                 return -EINVAL;
5076         }
5077
5078         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5079                 RTE_ETHDEV_LOG(INFO,
5080                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5081                         queue_id, port_id);
5082                 return -EINVAL;
5083         }
5084
5085         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5086
5087         memset(qinfo, 0, sizeof(*qinfo));
5088         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5089
5090         return 0;
5091 }
5092
5093 int
5094 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5095                           struct rte_eth_burst_mode *mode)
5096 {
5097         struct rte_eth_dev *dev;
5098
5099         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5100
5101         if (mode == NULL)
5102                 return -EINVAL;
5103
5104         dev = &rte_eth_devices[port_id];
5105
5106         if (queue_id >= dev->data->nb_rx_queues) {
5107                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5108                 return -EINVAL;
5109         }
5110
5111         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5112         memset(mode, 0, sizeof(*mode));
5113         return eth_err(port_id,
5114                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5115 }
5116
5117 int
5118 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5119                           struct rte_eth_burst_mode *mode)
5120 {
5121         struct rte_eth_dev *dev;
5122
5123         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5124
5125         if (mode == NULL)
5126                 return -EINVAL;
5127
5128         dev = &rte_eth_devices[port_id];
5129
5130         if (queue_id >= dev->data->nb_tx_queues) {
5131                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5132                 return -EINVAL;
5133         }
5134
5135         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5136         memset(mode, 0, sizeof(*mode));
5137         return eth_err(port_id,
5138                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5139 }
5140
5141 int
5142 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5143                              struct rte_ether_addr *mc_addr_set,
5144                              uint32_t nb_mc_addr)
5145 {
5146         struct rte_eth_dev *dev;
5147
5148         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5149
5150         dev = &rte_eth_devices[port_id];
5151         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5152         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5153                                                 mc_addr_set, nb_mc_addr));
5154 }
5155
5156 int
5157 rte_eth_timesync_enable(uint16_t port_id)
5158 {
5159         struct rte_eth_dev *dev;
5160
5161         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5162         dev = &rte_eth_devices[port_id];
5163
5164         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5165         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5166 }
5167
5168 int
5169 rte_eth_timesync_disable(uint16_t port_id)
5170 {
5171         struct rte_eth_dev *dev;
5172
5173         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5174         dev = &rte_eth_devices[port_id];
5175
5176         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5177         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5178 }
5179
5180 int
5181 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5182                                    uint32_t flags)
5183 {
5184         struct rte_eth_dev *dev;
5185
5186         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5187         dev = &rte_eth_devices[port_id];
5188
5189         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5190         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5191                                 (dev, timestamp, flags));
5192 }
5193
5194 int
5195 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5196                                    struct timespec *timestamp)
5197 {
5198         struct rte_eth_dev *dev;
5199
5200         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5201         dev = &rte_eth_devices[port_id];
5202
5203         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5204         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5205                                 (dev, timestamp));
5206 }
5207
5208 int
5209 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5210 {
5211         struct rte_eth_dev *dev;
5212
5213         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5214         dev = &rte_eth_devices[port_id];
5215
5216         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5217         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
5218                                                                       delta));
5219 }
5220
5221 int
5222 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5223 {
5224         struct rte_eth_dev *dev;
5225
5226         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5227         dev = &rte_eth_devices[port_id];
5228
5229         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5230         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5231                                                                 timestamp));
5232 }
5233
5234 int
5235 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5236 {
5237         struct rte_eth_dev *dev;
5238
5239         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5240         dev = &rte_eth_devices[port_id];
5241
5242         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5243         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5244                                                                 timestamp));
5245 }
5246
5247 int
5248 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5249 {
5250         struct rte_eth_dev *dev;
5251
5252         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5253         dev = &rte_eth_devices[port_id];
5254
5255         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5256         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5257 }
5258
5259 int
5260 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5261 {
5262         struct rte_eth_dev *dev;
5263
5264         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5265
5266         dev = &rte_eth_devices[port_id];
5267         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5268         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5269 }
5270
5271 int
5272 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5273 {
5274         struct rte_eth_dev *dev;
5275
5276         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5277
5278         dev = &rte_eth_devices[port_id];
5279         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5280         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5281 }
5282
5283 int
5284 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5285 {
5286         struct rte_eth_dev *dev;
5287
5288         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5289
5290         dev = &rte_eth_devices[port_id];
5291         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5292         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5293 }
5294
5295 int
5296 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5297 {
5298         struct rte_eth_dev *dev;
5299
5300         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5301
5302         dev = &rte_eth_devices[port_id];
5303         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5304         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5305 }
5306
5307 int
5308 rte_eth_dev_get_module_info(uint16_t port_id,
5309                             struct rte_eth_dev_module_info *modinfo)
5310 {
5311         struct rte_eth_dev *dev;
5312
5313         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5314
5315         dev = &rte_eth_devices[port_id];
5316         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5317         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5318 }
5319
5320 int
5321 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5322                               struct rte_dev_eeprom_info *info)
5323 {
5324         struct rte_eth_dev *dev;
5325
5326         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5327
5328         dev = &rte_eth_devices[port_id];
5329         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5330         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5331 }
5332
5333 int
5334 rte_eth_dev_get_dcb_info(uint16_t port_id,
5335                              struct rte_eth_dcb_info *dcb_info)
5336 {
5337         struct rte_eth_dev *dev;
5338
5339         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5340
5341         dev = &rte_eth_devices[port_id];
5342         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5343
5344         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5345         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5346 }
5347
5348 int
5349 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
5350                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
5351 {
5352         struct rte_eth_dev *dev;
5353
5354         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5355         if (l2_tunnel == NULL) {
5356                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
5357                 return -EINVAL;
5358         }
5359
5360         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
5361                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
5362                 return -EINVAL;
5363         }
5364
5365         dev = &rte_eth_devices[port_id];
5366         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
5367                                 -ENOTSUP);
5368         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
5369                                                                 l2_tunnel));
5370 }
5371
5372 int
5373 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
5374                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
5375                                   uint32_t mask,
5376                                   uint8_t en)
5377 {
5378         struct rte_eth_dev *dev;
5379
5380         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5381
5382         if (l2_tunnel == NULL) {
5383                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
5384                 return -EINVAL;
5385         }
5386
5387         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
5388                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
5389                 return -EINVAL;
5390         }
5391
5392         if (mask == 0) {
5393                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
5394                 return -EINVAL;
5395         }
5396
5397         dev = &rte_eth_devices[port_id];
5398         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
5399                                 -ENOTSUP);
5400         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
5401                                                         l2_tunnel, mask, en));
5402 }
5403
5404 static void
5405 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5406                 const struct rte_eth_desc_lim *desc_lim)
5407 {
5408         if (desc_lim->nb_align != 0)
5409                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5410
5411         if (desc_lim->nb_max != 0)
5412                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5413
5414         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5415 }
5416
5417 int
5418 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5419                                  uint16_t *nb_rx_desc,
5420                                  uint16_t *nb_tx_desc)
5421 {
5422         struct rte_eth_dev_info dev_info;
5423         int ret;
5424
5425         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5426
5427         ret = rte_eth_dev_info_get(port_id, &dev_info);
5428         if (ret != 0)
5429                 return ret;
5430
5431         if (nb_rx_desc != NULL)
5432                 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5433
5434         if (nb_tx_desc != NULL)
5435                 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5436
5437         return 0;
5438 }
5439
5440 int
5441 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5442                                    struct rte_eth_hairpin_cap *cap)
5443 {
5444         struct rte_eth_dev *dev;
5445
5446         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5447
5448         dev = &rte_eth_devices[port_id];
5449         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5450         memset(cap, 0, sizeof(*cap));
5451         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5452 }
5453
5454 int
5455 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5456 {
5457         if (dev->data->rx_queue_state[queue_id] ==
5458             RTE_ETH_QUEUE_STATE_HAIRPIN)
5459                 return 1;
5460         return 0;
5461 }
5462
5463 int
5464 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5465 {
5466         if (dev->data->tx_queue_state[queue_id] ==
5467             RTE_ETH_QUEUE_STATE_HAIRPIN)
5468                 return 1;
5469         return 0;
5470 }
5471
5472 int
5473 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5474 {
5475         struct rte_eth_dev *dev;
5476
5477         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5478
5479         if (pool == NULL)
5480                 return -EINVAL;
5481
5482         dev = &rte_eth_devices[port_id];
5483
5484         if (*dev->dev_ops->pool_ops_supported == NULL)
5485                 return 1; /* all pools are supported */
5486
5487         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5488 }
5489
5490 /**
5491  * A set of values to describe the possible states of a switch domain.
5492  */
5493 enum rte_eth_switch_domain_state {
5494         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5495         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5496 };
5497
5498 /**
5499  * Array of switch domains available for allocation. Array is sized to
5500  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5501  * ethdev ports in a single process.
5502  */
5503 static struct rte_eth_dev_switch {
5504         enum rte_eth_switch_domain_state state;
5505 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
5506
5507 int
5508 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5509 {
5510         unsigned int i;
5511
5512         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5513
5514         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5515                 if (eth_dev_switch_domains[i].state ==
5516                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5517                         eth_dev_switch_domains[i].state =
5518                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5519                         *domain_id = i;
5520                         return 0;
5521                 }
5522         }
5523
5524         return -ENOSPC;
5525 }
5526
5527 int
5528 rte_eth_switch_domain_free(uint16_t domain_id)
5529 {
5530         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5531                 domain_id >= RTE_MAX_ETHPORTS)
5532                 return -EINVAL;
5533
5534         if (eth_dev_switch_domains[domain_id].state !=
5535                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5536                 return -EINVAL;
5537
5538         eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5539
5540         return 0;
5541 }
5542
5543 static int
5544 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5545 {
5546         int state;
5547         struct rte_kvargs_pair *pair;
5548         char *letter;
5549
5550         arglist->str = strdup(str_in);
5551         if (arglist->str == NULL)
5552                 return -ENOMEM;
5553
5554         letter = arglist->str;
5555         state = 0;
5556         arglist->count = 0;
5557         pair = &arglist->pairs[0];
5558         while (1) {
5559                 switch (state) {
5560                 case 0: /* Initial */
5561                         if (*letter == '=')
5562                                 return -EINVAL;
5563                         else if (*letter == '\0')
5564                                 return 0;
5565
5566                         state = 1;
5567                         pair->key = letter;
5568                         /* fall-thru */
5569
5570                 case 1: /* Parsing key */
5571                         if (*letter == '=') {
5572                                 *letter = '\0';
5573                                 pair->value = letter + 1;
5574                                 state = 2;
5575                         } else if (*letter == ',' || *letter == '\0')
5576                                 return -EINVAL;
5577                         break;
5578
5579
5580                 case 2: /* Parsing value */
5581                         if (*letter == '[')
5582                                 state = 3;
5583                         else if (*letter == ',') {
5584                                 *letter = '\0';
5585                                 arglist->count++;
5586                                 pair = &arglist->pairs[arglist->count];
5587                                 state = 0;
5588                         } else if (*letter == '\0') {
5589                                 letter--;
5590                                 arglist->count++;
5591                                 pair = &arglist->pairs[arglist->count];
5592                                 state = 0;
5593                         }
5594                         break;
5595
5596                 case 3: /* Parsing list */
5597                         if (*letter == ']')
5598                                 state = 2;
5599                         else if (*letter == '\0')
5600                                 return -EINVAL;
5601                         break;
5602                 }
5603                 letter++;
5604         }
5605 }
5606
5607 int
5608 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5609 {
5610         struct rte_kvargs args;
5611         struct rte_kvargs_pair *pair;
5612         unsigned int i;
5613         int result = 0;
5614
5615         memset(eth_da, 0, sizeof(*eth_da));
5616
5617         result = eth_dev_devargs_tokenise(&args, dargs);
5618         if (result < 0)
5619                 goto parse_cleanup;
5620
5621         for (i = 0; i < args.count; i++) {
5622                 pair = &args.pairs[i];
5623                 if (strcmp("representor", pair->key) == 0) {
5624                         result = rte_eth_devargs_parse_list(pair->value,
5625                                 rte_eth_devargs_parse_representor_ports,
5626                                 eth_da);
5627                         if (result < 0)
5628                                 goto parse_cleanup;
5629                 }
5630         }
5631
5632 parse_cleanup:
5633         if (args.str)
5634                 free(args.str);
5635
5636         return result;
5637 }
5638
5639 static int
5640 eth_dev_handle_port_list(const char *cmd __rte_unused,
5641                 const char *params __rte_unused,
5642                 struct rte_tel_data *d)
5643 {
5644         int port_id;
5645
5646         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5647         RTE_ETH_FOREACH_DEV(port_id)
5648                 rte_tel_data_add_array_int(d, port_id);
5649         return 0;
5650 }
5651
5652 static void
5653 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5654                 const char *stat_name)
5655 {
5656         int q;
5657         struct rte_tel_data *q_data = rte_tel_data_alloc();
5658         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5659         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5660                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
5661         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5662 }
5663
5664 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5665
5666 static int
5667 eth_dev_handle_port_stats(const char *cmd __rte_unused,
5668                 const char *params,
5669                 struct rte_tel_data *d)
5670 {
5671         struct rte_eth_stats stats;
5672         int port_id, ret;
5673
5674         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5675                 return -1;
5676
5677         port_id = atoi(params);
5678         if (!rte_eth_dev_is_valid_port(port_id))
5679                 return -1;
5680
5681         ret = rte_eth_stats_get(port_id, &stats);
5682         if (ret < 0)
5683                 return -1;
5684
5685         rte_tel_data_start_dict(d);
5686         ADD_DICT_STAT(stats, ipackets);
5687         ADD_DICT_STAT(stats, opackets);
5688         ADD_DICT_STAT(stats, ibytes);
5689         ADD_DICT_STAT(stats, obytes);
5690         ADD_DICT_STAT(stats, imissed);
5691         ADD_DICT_STAT(stats, ierrors);
5692         ADD_DICT_STAT(stats, oerrors);
5693         ADD_DICT_STAT(stats, rx_nombuf);
5694         eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5695         eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5696         eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5697         eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5698         eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
5699
5700         return 0;
5701 }
5702
5703 static int
5704 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
5705                 const char *params,
5706                 struct rte_tel_data *d)
5707 {
5708         struct rte_eth_xstat *eth_xstats;
5709         struct rte_eth_xstat_name *xstat_names;
5710         int port_id, num_xstats;
5711         int i, ret;
5712         char *end_param;
5713
5714         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5715                 return -1;
5716
5717         port_id = strtoul(params, &end_param, 0);
5718         if (*end_param != '\0')
5719                 RTE_ETHDEV_LOG(NOTICE,
5720                         "Extra parameters passed to ethdev telemetry command, ignoring");
5721         if (!rte_eth_dev_is_valid_port(port_id))
5722                 return -1;
5723
5724         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5725         if (num_xstats < 0)
5726                 return -1;
5727
5728         /* use one malloc for both names and stats */
5729         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5730                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
5731         if (eth_xstats == NULL)
5732                 return -1;
5733         xstat_names = (void *)&eth_xstats[num_xstats];
5734
5735         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5736         if (ret < 0 || ret > num_xstats) {
5737                 free(eth_xstats);
5738                 return -1;
5739         }
5740
5741         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5742         if (ret < 0 || ret > num_xstats) {
5743                 free(eth_xstats);
5744                 return -1;
5745         }
5746
5747         rte_tel_data_start_dict(d);
5748         for (i = 0; i < num_xstats; i++)
5749                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5750                                 eth_xstats[i].value);
5751         return 0;
5752 }
5753
5754 static int
5755 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
5756                 const char *params,
5757                 struct rte_tel_data *d)
5758 {
5759         static const char *status_str = "status";
5760         int ret, port_id;
5761         struct rte_eth_link link;
5762         char *end_param;
5763
5764         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5765                 return -1;
5766
5767         port_id = strtoul(params, &end_param, 0);
5768         if (*end_param != '\0')
5769                 RTE_ETHDEV_LOG(NOTICE,
5770                         "Extra parameters passed to ethdev telemetry command, ignoring");
5771         if (!rte_eth_dev_is_valid_port(port_id))
5772                 return -1;
5773
5774         ret = rte_eth_link_get(port_id, &link);
5775         if (ret < 0)
5776                 return -1;
5777
5778         rte_tel_data_start_dict(d);
5779         if (!link.link_status) {
5780                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
5781                 return 0;
5782         }
5783         rte_tel_data_add_dict_string(d, status_str, "UP");
5784         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5785         rte_tel_data_add_dict_string(d, "duplex",
5786                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
5787                                 "full-duplex" : "half-duplex");
5788         return 0;
5789 }
5790
5791 int
5792 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
5793                                   struct rte_hairpin_peer_info *cur_info,
5794                                   struct rte_hairpin_peer_info *peer_info,
5795                                   uint32_t direction)
5796 {
5797         struct rte_eth_dev *dev;
5798
5799         /* Current queue information is not mandatory. */
5800         if (peer_info == NULL)
5801                 return -EINVAL;
5802
5803         /* No need to check the validity again. */
5804         dev = &rte_eth_devices[peer_port];
5805         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
5806                                 -ENOTSUP);
5807
5808         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
5809                                         cur_info, peer_info, direction);
5810 }
5811
5812 int
5813 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
5814                                 struct rte_hairpin_peer_info *peer_info,
5815                                 uint32_t direction)
5816 {
5817         struct rte_eth_dev *dev;
5818
5819         if (peer_info == NULL)
5820                 return -EINVAL;
5821
5822         /* No need to check the validity again. */
5823         dev = &rte_eth_devices[cur_port];
5824         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
5825                                 -ENOTSUP);
5826
5827         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
5828                                                         peer_info, direction);
5829 }
5830
5831 int
5832 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
5833                                   uint32_t direction)
5834 {
5835         struct rte_eth_dev *dev;
5836
5837         /* No need to check the validity again. */
5838         dev = &rte_eth_devices[cur_port];
5839         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
5840                                 -ENOTSUP);
5841
5842         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
5843                                                           direction);
5844 }
5845
5846 RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
5847
5848 RTE_INIT(ethdev_init_telemetry)
5849 {
5850         rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
5851                         "Returns list of available ethdev ports. Takes no parameters");
5852         rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
5853                         "Returns the common stats for a port. Parameters: int port_id");
5854         rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
5855                         "Returns the extended stats for a port. Parameters: int port_id");
5856         rte_telemetry_register_cmd("/ethdev/link_status",
5857                         eth_dev_handle_port_link_status,
5858                         "Returns the link status for a port. Parameters: int port_id");
5859 }