ethdev: add device flag to bypass auto-filled queue xstats
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_kvargs.h>
38 #include <rte_class.h>
39 #include <rte_ether.h>
40 #include <rte_telemetry.h>
41
42 #include "rte_ethdev_trace.h"
43 #include "rte_ethdev.h"
44 #include "rte_ethdev_driver.h"
45 #include "ethdev_profile.h"
46 #include "ethdev_private.h"
47
48 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
49 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
50
51 /* spinlock for eth device callbacks */
52 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* spinlock for add/remove rx callbacks */
55 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57 /* spinlock for add/remove tx callbacks */
58 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60 /* spinlock for shared data allocation */
61 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
62
63 /* store statistics names and its offset in stats structure  */
64 struct rte_eth_xstats_name_off {
65         char name[RTE_ETH_XSTATS_NAME_SIZE];
66         unsigned offset;
67 };
68
69 /* Shared memory between primary and secondary processes. */
70 static struct {
71         uint64_t next_owner_id;
72         rte_spinlock_t ownership_lock;
73         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
74 } *rte_eth_dev_shared_data;
75
76 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
77         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
78         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
79         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
80         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
81         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
82         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
83         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
84         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
85                 rx_nombuf)},
86 };
87
88 #define RTE_NB_STATS RTE_DIM(rte_stats_strings)
89
90 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
91         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
92         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
93         {"errors", offsetof(struct rte_eth_stats, q_errors)},
94 };
95
96 #define RTE_NB_RXQ_STATS RTE_DIM(rte_rxq_stats_strings)
97
98 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
99         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
100         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
101 };
102 #define RTE_NB_TXQ_STATS RTE_DIM(rte_txq_stats_strings)
103
104 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
105         { DEV_RX_OFFLOAD_##_name, #_name }
106
107 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)       \
108         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
133         RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
134 };
135
136 #undef RTE_RX_OFFLOAD_BIT2STR
137 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
138
139 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
140         { DEV_TX_OFFLOAD_##_name, #_name }
141
142 static const struct {
143         uint64_t offload;
144         const char *name;
145 } rte_tx_offload_names[] = {
146         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
147         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
151         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
154         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
155         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
157         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
158         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
159         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
160         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
161         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
162         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
163         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
164         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
165         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
166         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
167         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
168 };
169
170 #undef RTE_TX_OFFLOAD_BIT2STR
171
172 /**
173  * The user application callback description.
174  *
175  * It contains callback address to be registered by user application,
176  * the pointer to the parameters for callback, and the event type.
177  */
178 struct rte_eth_dev_callback {
179         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
180         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
181         void *cb_arg;                           /**< Parameter for callback */
182         void *ret_param;                        /**< Return parameter */
183         enum rte_eth_event_type event;          /**< Interrupt event type */
184         uint32_t active;                        /**< Callback is executing */
185 };
186
187 enum {
188         STAT_QMAP_TX = 0,
189         STAT_QMAP_RX
190 };
191
192 int
193 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
194 {
195         int ret;
196         struct rte_devargs devargs = {.args = NULL};
197         const char *bus_param_key;
198         char *bus_str = NULL;
199         char *cls_str = NULL;
200         int str_size;
201
202         memset(iter, 0, sizeof(*iter));
203
204         /*
205          * The devargs string may use various syntaxes:
206          *   - 0000:08:00.0,representor=[1-3]
207          *   - pci:0000:06:00.0,representor=[0,5]
208          *   - class=eth,mac=00:11:22:33:44:55
209          * A new syntax is in development (not yet supported):
210          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
211          */
212
213         /*
214          * Handle pure class filter (i.e. without any bus-level argument),
215          * from future new syntax.
216          * rte_devargs_parse() is not yet supporting the new syntax,
217          * that's why this simple case is temporarily parsed here.
218          */
219 #define iter_anybus_str "class=eth,"
220         if (strncmp(devargs_str, iter_anybus_str,
221                         strlen(iter_anybus_str)) == 0) {
222                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
223                 goto end;
224         }
225
226         /* Split bus, device and parameters. */
227         ret = rte_devargs_parse(&devargs, devargs_str);
228         if (ret != 0)
229                 goto error;
230
231         /*
232          * Assume parameters of old syntax can match only at ethdev level.
233          * Extra parameters will be ignored, thanks to "+" prefix.
234          */
235         str_size = strlen(devargs.args) + 2;
236         cls_str = malloc(str_size);
237         if (cls_str == NULL) {
238                 ret = -ENOMEM;
239                 goto error;
240         }
241         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
242         if (ret != str_size - 1) {
243                 ret = -EINVAL;
244                 goto error;
245         }
246         iter->cls_str = cls_str;
247         free(devargs.args); /* allocated by rte_devargs_parse() */
248         devargs.args = NULL;
249
250         iter->bus = devargs.bus;
251         if (iter->bus->dev_iterate == NULL) {
252                 ret = -ENOTSUP;
253                 goto error;
254         }
255
256         /* Convert bus args to new syntax for use with new API dev_iterate. */
257         if (strcmp(iter->bus->name, "vdev") == 0) {
258                 bus_param_key = "name";
259         } else if (strcmp(iter->bus->name, "pci") == 0) {
260                 bus_param_key = "addr";
261         } else {
262                 ret = -ENOTSUP;
263                 goto error;
264         }
265         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
266         bus_str = malloc(str_size);
267         if (bus_str == NULL) {
268                 ret = -ENOMEM;
269                 goto error;
270         }
271         ret = snprintf(bus_str, str_size, "%s=%s",
272                         bus_param_key, devargs.name);
273         if (ret != str_size - 1) {
274                 ret = -EINVAL;
275                 goto error;
276         }
277         iter->bus_str = bus_str;
278
279 end:
280         iter->cls = rte_class_find_by_name("eth");
281         return 0;
282
283 error:
284         if (ret == -ENOTSUP)
285                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
286                                 iter->bus->name);
287         free(devargs.args);
288         free(bus_str);
289         free(cls_str);
290         return ret;
291 }
292
293 uint16_t
294 rte_eth_iterator_next(struct rte_dev_iterator *iter)
295 {
296         if (iter->cls == NULL) /* invalid ethdev iterator */
297                 return RTE_MAX_ETHPORTS;
298
299         do { /* loop to try all matching rte_device */
300                 /* If not pure ethdev filter and */
301                 if (iter->bus != NULL &&
302                                 /* not in middle of rte_eth_dev iteration, */
303                                 iter->class_device == NULL) {
304                         /* get next rte_device to try. */
305                         iter->device = iter->bus->dev_iterate(
306                                         iter->device, iter->bus_str, iter);
307                         if (iter->device == NULL)
308                                 break; /* no more rte_device candidate */
309                 }
310                 /* A device is matching bus part, need to check ethdev part. */
311                 iter->class_device = iter->cls->dev_iterate(
312                                 iter->class_device, iter->cls_str, iter);
313                 if (iter->class_device != NULL)
314                         return eth_dev_to_id(iter->class_device); /* match */
315         } while (iter->bus != NULL); /* need to try next rte_device */
316
317         /* No more ethdev port to iterate. */
318         rte_eth_iterator_cleanup(iter);
319         return RTE_MAX_ETHPORTS;
320 }
321
322 void
323 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
324 {
325         if (iter->bus_str == NULL)
326                 return; /* nothing to free in pure class filter */
327         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
328         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
329         memset(iter, 0, sizeof(*iter));
330 }
331
332 uint16_t
333 rte_eth_find_next(uint16_t port_id)
334 {
335         while (port_id < RTE_MAX_ETHPORTS &&
336                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
337                 port_id++;
338
339         if (port_id >= RTE_MAX_ETHPORTS)
340                 return RTE_MAX_ETHPORTS;
341
342         return port_id;
343 }
344
345 /*
346  * Macro to iterate over all valid ports for internal usage.
347  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
348  */
349 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
350         for (port_id = rte_eth_find_next(0); \
351              port_id < RTE_MAX_ETHPORTS; \
352              port_id = rte_eth_find_next(port_id + 1))
353
354 uint16_t
355 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
356 {
357         port_id = rte_eth_find_next(port_id);
358         while (port_id < RTE_MAX_ETHPORTS &&
359                         rte_eth_devices[port_id].device != parent)
360                 port_id = rte_eth_find_next(port_id + 1);
361
362         return port_id;
363 }
364
365 uint16_t
366 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
367 {
368         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
369         return rte_eth_find_next_of(port_id,
370                         rte_eth_devices[ref_port_id].device);
371 }
372
373 static void
374 rte_eth_dev_shared_data_prepare(void)
375 {
376         const unsigned flags = 0;
377         const struct rte_memzone *mz;
378
379         rte_spinlock_lock(&rte_eth_shared_data_lock);
380
381         if (rte_eth_dev_shared_data == NULL) {
382                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
383                         /* Allocate port data and ownership shared memory. */
384                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
385                                         sizeof(*rte_eth_dev_shared_data),
386                                         rte_socket_id(), flags);
387                 } else
388                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
389                 if (mz == NULL)
390                         rte_panic("Cannot allocate ethdev shared data\n");
391
392                 rte_eth_dev_shared_data = mz->addr;
393                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
394                         rte_eth_dev_shared_data->next_owner_id =
395                                         RTE_ETH_DEV_NO_OWNER + 1;
396                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
397                         memset(rte_eth_dev_shared_data->data, 0,
398                                sizeof(rte_eth_dev_shared_data->data));
399                 }
400         }
401
402         rte_spinlock_unlock(&rte_eth_shared_data_lock);
403 }
404
405 static bool
406 is_allocated(const struct rte_eth_dev *ethdev)
407 {
408         return ethdev->data->name[0] != '\0';
409 }
410
411 static struct rte_eth_dev *
412 _rte_eth_dev_allocated(const char *name)
413 {
414         unsigned i;
415
416         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
417                 if (rte_eth_devices[i].data != NULL &&
418                     strcmp(rte_eth_devices[i].data->name, name) == 0)
419                         return &rte_eth_devices[i];
420         }
421         return NULL;
422 }
423
424 struct rte_eth_dev *
425 rte_eth_dev_allocated(const char *name)
426 {
427         struct rte_eth_dev *ethdev;
428
429         rte_eth_dev_shared_data_prepare();
430
431         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
432
433         ethdev = _rte_eth_dev_allocated(name);
434
435         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
436
437         return ethdev;
438 }
439
440 static uint16_t
441 rte_eth_dev_find_free_port(void)
442 {
443         unsigned i;
444
445         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
446                 /* Using shared name field to find a free port. */
447                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
448                         RTE_ASSERT(rte_eth_devices[i].state ==
449                                    RTE_ETH_DEV_UNUSED);
450                         return i;
451                 }
452         }
453         return RTE_MAX_ETHPORTS;
454 }
455
456 static struct rte_eth_dev *
457 eth_dev_get(uint16_t port_id)
458 {
459         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
460
461         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
462
463         return eth_dev;
464 }
465
466 struct rte_eth_dev *
467 rte_eth_dev_allocate(const char *name)
468 {
469         uint16_t port_id;
470         struct rte_eth_dev *eth_dev = NULL;
471         size_t name_len;
472
473         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
474         if (name_len == 0) {
475                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
476                 return NULL;
477         }
478
479         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
480                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
481                 return NULL;
482         }
483
484         rte_eth_dev_shared_data_prepare();
485
486         /* Synchronize port creation between primary and secondary threads. */
487         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
488
489         if (_rte_eth_dev_allocated(name) != NULL) {
490                 RTE_ETHDEV_LOG(ERR,
491                         "Ethernet device with name %s already allocated\n",
492                         name);
493                 goto unlock;
494         }
495
496         port_id = rte_eth_dev_find_free_port();
497         if (port_id == RTE_MAX_ETHPORTS) {
498                 RTE_ETHDEV_LOG(ERR,
499                         "Reached maximum number of Ethernet ports\n");
500                 goto unlock;
501         }
502
503         eth_dev = eth_dev_get(port_id);
504         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
505         eth_dev->data->port_id = port_id;
506         eth_dev->data->mtu = RTE_ETHER_MTU;
507         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
508
509 unlock:
510         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
511
512         return eth_dev;
513 }
514
515 /*
516  * Attach to a port already registered by the primary process, which
517  * makes sure that the same device would have the same port id both
518  * in the primary and secondary process.
519  */
520 struct rte_eth_dev *
521 rte_eth_dev_attach_secondary(const char *name)
522 {
523         uint16_t i;
524         struct rte_eth_dev *eth_dev = NULL;
525
526         rte_eth_dev_shared_data_prepare();
527
528         /* Synchronize port attachment to primary port creation and release. */
529         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
530
531         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
532                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
533                         break;
534         }
535         if (i == RTE_MAX_ETHPORTS) {
536                 RTE_ETHDEV_LOG(ERR,
537                         "Device %s is not driven by the primary process\n",
538                         name);
539         } else {
540                 eth_dev = eth_dev_get(i);
541                 RTE_ASSERT(eth_dev->data->port_id == i);
542         }
543
544         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
545         return eth_dev;
546 }
547
548 int
549 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
550 {
551         if (eth_dev == NULL)
552                 return -EINVAL;
553
554         rte_eth_dev_shared_data_prepare();
555
556         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
557                 rte_eth_dev_callback_process(eth_dev,
558                                 RTE_ETH_EVENT_DESTROY, NULL);
559
560         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
561
562         eth_dev->state = RTE_ETH_DEV_UNUSED;
563         eth_dev->device = NULL;
564         eth_dev->process_private = NULL;
565         eth_dev->intr_handle = NULL;
566         eth_dev->rx_pkt_burst = NULL;
567         eth_dev->tx_pkt_burst = NULL;
568         eth_dev->tx_pkt_prepare = NULL;
569         eth_dev->rx_queue_count = NULL;
570         eth_dev->rx_descriptor_done = NULL;
571         eth_dev->rx_descriptor_status = NULL;
572         eth_dev->tx_descriptor_status = NULL;
573         eth_dev->dev_ops = NULL;
574
575         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
576                 rte_free(eth_dev->data->rx_queues);
577                 rte_free(eth_dev->data->tx_queues);
578                 rte_free(eth_dev->data->mac_addrs);
579                 rte_free(eth_dev->data->hash_mac_addrs);
580                 rte_free(eth_dev->data->dev_private);
581                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
582                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
583         }
584
585         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
586
587         return 0;
588 }
589
590 int
591 rte_eth_dev_is_valid_port(uint16_t port_id)
592 {
593         if (port_id >= RTE_MAX_ETHPORTS ||
594             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
595                 return 0;
596         else
597                 return 1;
598 }
599
600 static int
601 rte_eth_is_valid_owner_id(uint64_t owner_id)
602 {
603         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
604             rte_eth_dev_shared_data->next_owner_id <= owner_id)
605                 return 0;
606         return 1;
607 }
608
609 uint64_t
610 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
611 {
612         port_id = rte_eth_find_next(port_id);
613         while (port_id < RTE_MAX_ETHPORTS &&
614                         rte_eth_devices[port_id].data->owner.id != owner_id)
615                 port_id = rte_eth_find_next(port_id + 1);
616
617         return port_id;
618 }
619
620 int
621 rte_eth_dev_owner_new(uint64_t *owner_id)
622 {
623         rte_eth_dev_shared_data_prepare();
624
625         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
626
627         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
628
629         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
630         return 0;
631 }
632
633 static int
634 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
635                        const struct rte_eth_dev_owner *new_owner)
636 {
637         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
638         struct rte_eth_dev_owner *port_owner;
639
640         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
641                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
642                         port_id);
643                 return -ENODEV;
644         }
645
646         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
647             !rte_eth_is_valid_owner_id(old_owner_id)) {
648                 RTE_ETHDEV_LOG(ERR,
649                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
650                        old_owner_id, new_owner->id);
651                 return -EINVAL;
652         }
653
654         port_owner = &rte_eth_devices[port_id].data->owner;
655         if (port_owner->id != old_owner_id) {
656                 RTE_ETHDEV_LOG(ERR,
657                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
658                         port_id, port_owner->name, port_owner->id);
659                 return -EPERM;
660         }
661
662         /* can not truncate (same structure) */
663         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
664
665         port_owner->id = new_owner->id;
666
667         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
668                 port_id, new_owner->name, new_owner->id);
669
670         return 0;
671 }
672
673 int
674 rte_eth_dev_owner_set(const uint16_t port_id,
675                       const struct rte_eth_dev_owner *owner)
676 {
677         int ret;
678
679         rte_eth_dev_shared_data_prepare();
680
681         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
682
683         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
684
685         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
686         return ret;
687 }
688
689 int
690 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
691 {
692         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
693                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
694         int ret;
695
696         rte_eth_dev_shared_data_prepare();
697
698         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
699
700         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
701
702         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
703         return ret;
704 }
705
706 int
707 rte_eth_dev_owner_delete(const uint64_t owner_id)
708 {
709         uint16_t port_id;
710         int ret = 0;
711
712         rte_eth_dev_shared_data_prepare();
713
714         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
715
716         if (rte_eth_is_valid_owner_id(owner_id)) {
717                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
718                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
719                                 memset(&rte_eth_devices[port_id].data->owner, 0,
720                                        sizeof(struct rte_eth_dev_owner));
721                 RTE_ETHDEV_LOG(NOTICE,
722                         "All port owners owned by %016"PRIx64" identifier have removed\n",
723                         owner_id);
724         } else {
725                 RTE_ETHDEV_LOG(ERR,
726                                "Invalid owner id=%016"PRIx64"\n",
727                                owner_id);
728                 ret = -EINVAL;
729         }
730
731         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
732
733         return ret;
734 }
735
736 int
737 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
738 {
739         int ret = 0;
740         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
741
742         rte_eth_dev_shared_data_prepare();
743
744         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
745
746         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
747                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
748                         port_id);
749                 ret = -ENODEV;
750         } else {
751                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
752         }
753
754         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
755         return ret;
756 }
757
758 int
759 rte_eth_dev_socket_id(uint16_t port_id)
760 {
761         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
762         return rte_eth_devices[port_id].data->numa_node;
763 }
764
765 void *
766 rte_eth_dev_get_sec_ctx(uint16_t port_id)
767 {
768         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
769         return rte_eth_devices[port_id].security_ctx;
770 }
771
772 uint16_t
773 rte_eth_dev_count_avail(void)
774 {
775         uint16_t p;
776         uint16_t count;
777
778         count = 0;
779
780         RTE_ETH_FOREACH_DEV(p)
781                 count++;
782
783         return count;
784 }
785
786 uint16_t
787 rte_eth_dev_count_total(void)
788 {
789         uint16_t port, count = 0;
790
791         RTE_ETH_FOREACH_VALID_DEV(port)
792                 count++;
793
794         return count;
795 }
796
797 int
798 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
799 {
800         char *tmp;
801
802         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
803
804         if (name == NULL) {
805                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
806                 return -EINVAL;
807         }
808
809         /* shouldn't check 'rte_eth_devices[i].data',
810          * because it might be overwritten by VDEV PMD */
811         tmp = rte_eth_dev_shared_data->data[port_id].name;
812         strcpy(name, tmp);
813         return 0;
814 }
815
816 int
817 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
818 {
819         uint32_t pid;
820
821         if (name == NULL) {
822                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
823                 return -EINVAL;
824         }
825
826         RTE_ETH_FOREACH_VALID_DEV(pid)
827                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
828                         *port_id = pid;
829                         return 0;
830                 }
831
832         return -ENODEV;
833 }
834
835 static int
836 eth_err(uint16_t port_id, int ret)
837 {
838         if (ret == 0)
839                 return 0;
840         if (rte_eth_dev_is_removed(port_id))
841                 return -EIO;
842         return ret;
843 }
844
845 static int
846 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
847 {
848         uint16_t old_nb_queues = dev->data->nb_rx_queues;
849         void **rxq;
850         unsigned i;
851
852         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
853                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
854                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
855                                 RTE_CACHE_LINE_SIZE);
856                 if (dev->data->rx_queues == NULL) {
857                         dev->data->nb_rx_queues = 0;
858                         return -(ENOMEM);
859                 }
860         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
861                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
862
863                 rxq = dev->data->rx_queues;
864
865                 for (i = nb_queues; i < old_nb_queues; i++)
866                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
867                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
868                                 RTE_CACHE_LINE_SIZE);
869                 if (rxq == NULL)
870                         return -(ENOMEM);
871                 if (nb_queues > old_nb_queues) {
872                         uint16_t new_qs = nb_queues - old_nb_queues;
873
874                         memset(rxq + old_nb_queues, 0,
875                                 sizeof(rxq[0]) * new_qs);
876                 }
877
878                 dev->data->rx_queues = rxq;
879
880         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
881                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
882
883                 rxq = dev->data->rx_queues;
884
885                 for (i = nb_queues; i < old_nb_queues; i++)
886                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
887
888                 rte_free(dev->data->rx_queues);
889                 dev->data->rx_queues = NULL;
890         }
891         dev->data->nb_rx_queues = nb_queues;
892         return 0;
893 }
894
895 static int
896 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
897 {
898         uint16_t port_id;
899
900         if (rx_queue_id >= dev->data->nb_rx_queues) {
901                 port_id = dev->data->port_id;
902                 RTE_ETHDEV_LOG(ERR,
903                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
904                                rx_queue_id, port_id);
905                 return -EINVAL;
906         }
907
908         if (dev->data->rx_queues[rx_queue_id] == NULL) {
909                 port_id = dev->data->port_id;
910                 RTE_ETHDEV_LOG(ERR,
911                                "Queue %u of device with port_id=%u has not been setup\n",
912                                rx_queue_id, port_id);
913                 return -EINVAL;
914         }
915
916         return 0;
917 }
918
919 static int
920 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
921 {
922         uint16_t port_id;
923
924         if (tx_queue_id >= dev->data->nb_tx_queues) {
925                 port_id = dev->data->port_id;
926                 RTE_ETHDEV_LOG(ERR,
927                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
928                                tx_queue_id, port_id);
929                 return -EINVAL;
930         }
931
932         if (dev->data->tx_queues[tx_queue_id] == NULL) {
933                 port_id = dev->data->port_id;
934                 RTE_ETHDEV_LOG(ERR,
935                                "Queue %u of device with port_id=%u has not been setup\n",
936                                tx_queue_id, port_id);
937                 return -EINVAL;
938         }
939
940         return 0;
941 }
942
943 int
944 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
945 {
946         struct rte_eth_dev *dev;
947         int ret;
948
949         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
950
951         dev = &rte_eth_devices[port_id];
952         if (!dev->data->dev_started) {
953                 RTE_ETHDEV_LOG(ERR,
954                         "Port %u must be started before start any queue\n",
955                         port_id);
956                 return -EINVAL;
957         }
958
959         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
960         if (ret != 0)
961                 return ret;
962
963         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
964
965         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
966                 RTE_ETHDEV_LOG(INFO,
967                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
968                         rx_queue_id, port_id);
969                 return -EINVAL;
970         }
971
972         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
973                 RTE_ETHDEV_LOG(INFO,
974                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
975                         rx_queue_id, port_id);
976                 return 0;
977         }
978
979         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
980                                                              rx_queue_id));
981
982 }
983
984 int
985 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
986 {
987         struct rte_eth_dev *dev;
988         int ret;
989
990         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
991
992         dev = &rte_eth_devices[port_id];
993
994         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
995         if (ret != 0)
996                 return ret;
997
998         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
999
1000         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1001                 RTE_ETHDEV_LOG(INFO,
1002                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1003                         rx_queue_id, port_id);
1004                 return -EINVAL;
1005         }
1006
1007         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1008                 RTE_ETHDEV_LOG(INFO,
1009                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1010                         rx_queue_id, port_id);
1011                 return 0;
1012         }
1013
1014         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1015
1016 }
1017
1018 int
1019 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1020 {
1021         struct rte_eth_dev *dev;
1022         int ret;
1023
1024         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1025
1026         dev = &rte_eth_devices[port_id];
1027         if (!dev->data->dev_started) {
1028                 RTE_ETHDEV_LOG(ERR,
1029                         "Port %u must be started before start any queue\n",
1030                         port_id);
1031                 return -EINVAL;
1032         }
1033
1034         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1035         if (ret != 0)
1036                 return ret;
1037
1038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1039
1040         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1041                 RTE_ETHDEV_LOG(INFO,
1042                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1043                         tx_queue_id, port_id);
1044                 return -EINVAL;
1045         }
1046
1047         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1048                 RTE_ETHDEV_LOG(INFO,
1049                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1050                         tx_queue_id, port_id);
1051                 return 0;
1052         }
1053
1054         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1055 }
1056
1057 int
1058 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1059 {
1060         struct rte_eth_dev *dev;
1061         int ret;
1062
1063         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1064
1065         dev = &rte_eth_devices[port_id];
1066
1067         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1068         if (ret != 0)
1069                 return ret;
1070
1071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1072
1073         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1074                 RTE_ETHDEV_LOG(INFO,
1075                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1076                         tx_queue_id, port_id);
1077                 return -EINVAL;
1078         }
1079
1080         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1081                 RTE_ETHDEV_LOG(INFO,
1082                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1083                         tx_queue_id, port_id);
1084                 return 0;
1085         }
1086
1087         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1088
1089 }
1090
1091 static int
1092 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1093 {
1094         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1095         void **txq;
1096         unsigned i;
1097
1098         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1099                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1100                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1101                                                    RTE_CACHE_LINE_SIZE);
1102                 if (dev->data->tx_queues == NULL) {
1103                         dev->data->nb_tx_queues = 0;
1104                         return -(ENOMEM);
1105                 }
1106         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1107                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1108
1109                 txq = dev->data->tx_queues;
1110
1111                 for (i = nb_queues; i < old_nb_queues; i++)
1112                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1113                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1114                                   RTE_CACHE_LINE_SIZE);
1115                 if (txq == NULL)
1116                         return -ENOMEM;
1117                 if (nb_queues > old_nb_queues) {
1118                         uint16_t new_qs = nb_queues - old_nb_queues;
1119
1120                         memset(txq + old_nb_queues, 0,
1121                                sizeof(txq[0]) * new_qs);
1122                 }
1123
1124                 dev->data->tx_queues = txq;
1125
1126         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1127                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1128
1129                 txq = dev->data->tx_queues;
1130
1131                 for (i = nb_queues; i < old_nb_queues; i++)
1132                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1133
1134                 rte_free(dev->data->tx_queues);
1135                 dev->data->tx_queues = NULL;
1136         }
1137         dev->data->nb_tx_queues = nb_queues;
1138         return 0;
1139 }
1140
1141 uint32_t
1142 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1143 {
1144         switch (speed) {
1145         case ETH_SPEED_NUM_10M:
1146                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1147         case ETH_SPEED_NUM_100M:
1148                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1149         case ETH_SPEED_NUM_1G:
1150                 return ETH_LINK_SPEED_1G;
1151         case ETH_SPEED_NUM_2_5G:
1152                 return ETH_LINK_SPEED_2_5G;
1153         case ETH_SPEED_NUM_5G:
1154                 return ETH_LINK_SPEED_5G;
1155         case ETH_SPEED_NUM_10G:
1156                 return ETH_LINK_SPEED_10G;
1157         case ETH_SPEED_NUM_20G:
1158                 return ETH_LINK_SPEED_20G;
1159         case ETH_SPEED_NUM_25G:
1160                 return ETH_LINK_SPEED_25G;
1161         case ETH_SPEED_NUM_40G:
1162                 return ETH_LINK_SPEED_40G;
1163         case ETH_SPEED_NUM_50G:
1164                 return ETH_LINK_SPEED_50G;
1165         case ETH_SPEED_NUM_56G:
1166                 return ETH_LINK_SPEED_56G;
1167         case ETH_SPEED_NUM_100G:
1168                 return ETH_LINK_SPEED_100G;
1169         case ETH_SPEED_NUM_200G:
1170                 return ETH_LINK_SPEED_200G;
1171         default:
1172                 return 0;
1173         }
1174 }
1175
1176 const char *
1177 rte_eth_dev_rx_offload_name(uint64_t offload)
1178 {
1179         const char *name = "UNKNOWN";
1180         unsigned int i;
1181
1182         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1183                 if (offload == rte_rx_offload_names[i].offload) {
1184                         name = rte_rx_offload_names[i].name;
1185                         break;
1186                 }
1187         }
1188
1189         return name;
1190 }
1191
1192 const char *
1193 rte_eth_dev_tx_offload_name(uint64_t offload)
1194 {
1195         const char *name = "UNKNOWN";
1196         unsigned int i;
1197
1198         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1199                 if (offload == rte_tx_offload_names[i].offload) {
1200                         name = rte_tx_offload_names[i].name;
1201                         break;
1202                 }
1203         }
1204
1205         return name;
1206 }
1207
1208 static inline int
1209 check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1210                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1211 {
1212         int ret = 0;
1213
1214         if (dev_info_size == 0) {
1215                 if (config_size != max_rx_pkt_len) {
1216                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1217                                        " %u != %u is not allowed\n",
1218                                        port_id, config_size, max_rx_pkt_len);
1219                         ret = -EINVAL;
1220                 }
1221         } else if (config_size > dev_info_size) {
1222                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1223                                "> max allowed value %u\n", port_id, config_size,
1224                                dev_info_size);
1225                 ret = -EINVAL;
1226         } else if (config_size < RTE_ETHER_MIN_LEN) {
1227                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1228                                "< min allowed value %u\n", port_id, config_size,
1229                                (unsigned int)RTE_ETHER_MIN_LEN);
1230                 ret = -EINVAL;
1231         }
1232         return ret;
1233 }
1234
1235 /*
1236  * Validate offloads that are requested through rte_eth_dev_configure against
1237  * the offloads successfully set by the ethernet device.
1238  *
1239  * @param port_id
1240  *   The port identifier of the Ethernet device.
1241  * @param req_offloads
1242  *   The offloads that have been requested through `rte_eth_dev_configure`.
1243  * @param set_offloads
1244  *   The offloads successfully set by the ethernet device.
1245  * @param offload_type
1246  *   The offload type i.e. Rx/Tx string.
1247  * @param offload_name
1248  *   The function that prints the offload name.
1249  * @return
1250  *   - (0) if validation successful.
1251  *   - (-EINVAL) if requested offload has been silently disabled.
1252  *
1253  */
1254 static int
1255 validate_offloads(uint16_t port_id, uint64_t req_offloads,
1256                   uint64_t set_offloads, const char *offload_type,
1257                   const char *(*offload_name)(uint64_t))
1258 {
1259         uint64_t offloads_diff = req_offloads ^ set_offloads;
1260         uint64_t offload;
1261         int ret = 0;
1262
1263         while (offloads_diff != 0) {
1264                 /* Check if any offload is requested but not enabled. */
1265                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1266                 if (offload & req_offloads) {
1267                         RTE_ETHDEV_LOG(ERR,
1268                                 "Port %u failed to enable %s offload %s\n",
1269                                 port_id, offload_type, offload_name(offload));
1270                         ret = -EINVAL;
1271                 }
1272
1273                 /* Check if offload couldn't be disabled. */
1274                 if (offload & set_offloads) {
1275                         RTE_ETHDEV_LOG(DEBUG,
1276                                 "Port %u %s offload %s is not requested but enabled\n",
1277                                 port_id, offload_type, offload_name(offload));
1278                 }
1279
1280                 offloads_diff &= ~offload;
1281         }
1282
1283         return ret;
1284 }
1285
1286 int
1287 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1288                       const struct rte_eth_conf *dev_conf)
1289 {
1290         struct rte_eth_dev *dev;
1291         struct rte_eth_dev_info dev_info;
1292         struct rte_eth_conf orig_conf;
1293         int diag;
1294         int ret;
1295
1296         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1297
1298         dev = &rte_eth_devices[port_id];
1299
1300         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1301
1302         if (dev->data->dev_started) {
1303                 RTE_ETHDEV_LOG(ERR,
1304                         "Port %u must be stopped to allow configuration\n",
1305                         port_id);
1306                 return -EBUSY;
1307         }
1308
1309          /* Store original config, as rollback required on failure */
1310         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1311
1312         /*
1313          * Copy the dev_conf parameter into the dev structure.
1314          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1315          */
1316         if (dev_conf != &dev->data->dev_conf)
1317                 memcpy(&dev->data->dev_conf, dev_conf,
1318                        sizeof(dev->data->dev_conf));
1319
1320         ret = rte_eth_dev_info_get(port_id, &dev_info);
1321         if (ret != 0)
1322                 goto rollback;
1323
1324         /* If number of queues specified by application for both Rx and Tx is
1325          * zero, use driver preferred values. This cannot be done individually
1326          * as it is valid for either Tx or Rx (but not both) to be zero.
1327          * If driver does not provide any preferred valued, fall back on
1328          * EAL defaults.
1329          */
1330         if (nb_rx_q == 0 && nb_tx_q == 0) {
1331                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1332                 if (nb_rx_q == 0)
1333                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1334                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1335                 if (nb_tx_q == 0)
1336                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1337         }
1338
1339         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1340                 RTE_ETHDEV_LOG(ERR,
1341                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1342                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1343                 ret = -EINVAL;
1344                 goto rollback;
1345         }
1346
1347         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1348                 RTE_ETHDEV_LOG(ERR,
1349                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1350                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1351                 ret = -EINVAL;
1352                 goto rollback;
1353         }
1354
1355         /*
1356          * Check that the numbers of RX and TX queues are not greater
1357          * than the maximum number of RX and TX queues supported by the
1358          * configured device.
1359          */
1360         if (nb_rx_q > dev_info.max_rx_queues) {
1361                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1362                         port_id, nb_rx_q, dev_info.max_rx_queues);
1363                 ret = -EINVAL;
1364                 goto rollback;
1365         }
1366
1367         if (nb_tx_q > dev_info.max_tx_queues) {
1368                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1369                         port_id, nb_tx_q, dev_info.max_tx_queues);
1370                 ret = -EINVAL;
1371                 goto rollback;
1372         }
1373
1374         /* Check that the device supports requested interrupts */
1375         if ((dev_conf->intr_conf.lsc == 1) &&
1376                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1377                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1378                         dev->device->driver->name);
1379                 ret = -EINVAL;
1380                 goto rollback;
1381         }
1382         if ((dev_conf->intr_conf.rmv == 1) &&
1383                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1384                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1385                         dev->device->driver->name);
1386                 ret = -EINVAL;
1387                 goto rollback;
1388         }
1389
1390         /*
1391          * If jumbo frames are enabled, check that the maximum RX packet
1392          * length is supported by the configured device.
1393          */
1394         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1395                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1396                         RTE_ETHDEV_LOG(ERR,
1397                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1398                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1399                                 dev_info.max_rx_pktlen);
1400                         ret = -EINVAL;
1401                         goto rollback;
1402                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1403                         RTE_ETHDEV_LOG(ERR,
1404                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1405                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1406                                 (unsigned int)RTE_ETHER_MIN_LEN);
1407                         ret = -EINVAL;
1408                         goto rollback;
1409                 }
1410         } else {
1411                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1412                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1413                         /* Use default value */
1414                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1415                                                         RTE_ETHER_MAX_LEN;
1416         }
1417
1418         /*
1419          * If LRO is enabled, check that the maximum aggregated packet
1420          * size is supported by the configured device.
1421          */
1422         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1423                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1424                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1425                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1426                 ret = check_lro_pkt_size(port_id,
1427                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1428                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1429                                 dev_info.max_lro_pkt_size);
1430                 if (ret != 0)
1431                         goto rollback;
1432         }
1433
1434         /* Any requested offloading must be within its device capabilities */
1435         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1436              dev_conf->rxmode.offloads) {
1437                 RTE_ETHDEV_LOG(ERR,
1438                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1439                         "capabilities 0x%"PRIx64" in %s()\n",
1440                         port_id, dev_conf->rxmode.offloads,
1441                         dev_info.rx_offload_capa,
1442                         __func__);
1443                 ret = -EINVAL;
1444                 goto rollback;
1445         }
1446         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1447              dev_conf->txmode.offloads) {
1448                 RTE_ETHDEV_LOG(ERR,
1449                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1450                         "capabilities 0x%"PRIx64" in %s()\n",
1451                         port_id, dev_conf->txmode.offloads,
1452                         dev_info.tx_offload_capa,
1453                         __func__);
1454                 ret = -EINVAL;
1455                 goto rollback;
1456         }
1457
1458         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1459                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1460
1461         /* Check that device supports requested rss hash functions. */
1462         if ((dev_info.flow_type_rss_offloads |
1463              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1464             dev_info.flow_type_rss_offloads) {
1465                 RTE_ETHDEV_LOG(ERR,
1466                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1467                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1468                         dev_info.flow_type_rss_offloads);
1469                 ret = -EINVAL;
1470                 goto rollback;
1471         }
1472
1473         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1474         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1475             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1476                 RTE_ETHDEV_LOG(ERR,
1477                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1478                         port_id,
1479                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1480                 ret = -EINVAL;
1481                 goto rollback;
1482         }
1483
1484         /*
1485          * Setup new number of RX/TX queues and reconfigure device.
1486          */
1487         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1488         if (diag != 0) {
1489                 RTE_ETHDEV_LOG(ERR,
1490                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1491                         port_id, diag);
1492                 ret = diag;
1493                 goto rollback;
1494         }
1495
1496         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1497         if (diag != 0) {
1498                 RTE_ETHDEV_LOG(ERR,
1499                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1500                         port_id, diag);
1501                 rte_eth_dev_rx_queue_config(dev, 0);
1502                 ret = diag;
1503                 goto rollback;
1504         }
1505
1506         diag = (*dev->dev_ops->dev_configure)(dev);
1507         if (diag != 0) {
1508                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1509                         port_id, diag);
1510                 ret = eth_err(port_id, diag);
1511                 goto reset_queues;
1512         }
1513
1514         /* Initialize Rx profiling if enabled at compilation time. */
1515         diag = __rte_eth_dev_profile_init(port_id, dev);
1516         if (diag != 0) {
1517                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1518                         port_id, diag);
1519                 ret = eth_err(port_id, diag);
1520                 goto reset_queues;
1521         }
1522
1523         /* Validate Rx offloads. */
1524         diag = validate_offloads(port_id,
1525                         dev_conf->rxmode.offloads,
1526                         dev->data->dev_conf.rxmode.offloads, "Rx",
1527                         rte_eth_dev_rx_offload_name);
1528         if (diag != 0) {
1529                 ret = diag;
1530                 goto reset_queues;
1531         }
1532
1533         /* Validate Tx offloads. */
1534         diag = validate_offloads(port_id,
1535                         dev_conf->txmode.offloads,
1536                         dev->data->dev_conf.txmode.offloads, "Tx",
1537                         rte_eth_dev_tx_offload_name);
1538         if (diag != 0) {
1539                 ret = diag;
1540                 goto reset_queues;
1541         }
1542
1543         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1544         return 0;
1545 reset_queues:
1546         rte_eth_dev_rx_queue_config(dev, 0);
1547         rte_eth_dev_tx_queue_config(dev, 0);
1548 rollback:
1549         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1550
1551         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1552         return ret;
1553 }
1554
1555 void
1556 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1557 {
1558         if (dev->data->dev_started) {
1559                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1560                         dev->data->port_id);
1561                 return;
1562         }
1563
1564         rte_eth_dev_rx_queue_config(dev, 0);
1565         rte_eth_dev_tx_queue_config(dev, 0);
1566
1567         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1568 }
1569
1570 static void
1571 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1572                         struct rte_eth_dev_info *dev_info)
1573 {
1574         struct rte_ether_addr *addr;
1575         uint16_t i;
1576         uint32_t pool = 0;
1577         uint64_t pool_mask;
1578
1579         /* replay MAC address configuration including default MAC */
1580         addr = &dev->data->mac_addrs[0];
1581         if (*dev->dev_ops->mac_addr_set != NULL)
1582                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1583         else if (*dev->dev_ops->mac_addr_add != NULL)
1584                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1585
1586         if (*dev->dev_ops->mac_addr_add != NULL) {
1587                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1588                         addr = &dev->data->mac_addrs[i];
1589
1590                         /* skip zero address */
1591                         if (rte_is_zero_ether_addr(addr))
1592                                 continue;
1593
1594                         pool = 0;
1595                         pool_mask = dev->data->mac_pool_sel[i];
1596
1597                         do {
1598                                 if (pool_mask & 1ULL)
1599                                         (*dev->dev_ops->mac_addr_add)(dev,
1600                                                 addr, i, pool);
1601                                 pool_mask >>= 1;
1602                                 pool++;
1603                         } while (pool_mask);
1604                 }
1605         }
1606 }
1607
1608 static int
1609 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1610                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1611 {
1612         int ret;
1613
1614         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1615                 rte_eth_dev_mac_restore(dev, dev_info);
1616
1617         /* replay promiscuous configuration */
1618         /*
1619          * use callbacks directly since we don't need port_id check and
1620          * would like to bypass the same value set
1621          */
1622         if (rte_eth_promiscuous_get(port_id) == 1 &&
1623             *dev->dev_ops->promiscuous_enable != NULL) {
1624                 ret = eth_err(port_id,
1625                               (*dev->dev_ops->promiscuous_enable)(dev));
1626                 if (ret != 0 && ret != -ENOTSUP) {
1627                         RTE_ETHDEV_LOG(ERR,
1628                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1629                                 port_id, rte_strerror(-ret));
1630                         return ret;
1631                 }
1632         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1633                    *dev->dev_ops->promiscuous_disable != NULL) {
1634                 ret = eth_err(port_id,
1635                               (*dev->dev_ops->promiscuous_disable)(dev));
1636                 if (ret != 0 && ret != -ENOTSUP) {
1637                         RTE_ETHDEV_LOG(ERR,
1638                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1639                                 port_id, rte_strerror(-ret));
1640                         return ret;
1641                 }
1642         }
1643
1644         /* replay all multicast configuration */
1645         /*
1646          * use callbacks directly since we don't need port_id check and
1647          * would like to bypass the same value set
1648          */
1649         if (rte_eth_allmulticast_get(port_id) == 1 &&
1650             *dev->dev_ops->allmulticast_enable != NULL) {
1651                 ret = eth_err(port_id,
1652                               (*dev->dev_ops->allmulticast_enable)(dev));
1653                 if (ret != 0 && ret != -ENOTSUP) {
1654                         RTE_ETHDEV_LOG(ERR,
1655                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1656                                 port_id, rte_strerror(-ret));
1657                         return ret;
1658                 }
1659         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1660                    *dev->dev_ops->allmulticast_disable != NULL) {
1661                 ret = eth_err(port_id,
1662                               (*dev->dev_ops->allmulticast_disable)(dev));
1663                 if (ret != 0 && ret != -ENOTSUP) {
1664                         RTE_ETHDEV_LOG(ERR,
1665                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1666                                 port_id, rte_strerror(-ret));
1667                         return ret;
1668                 }
1669         }
1670
1671         return 0;
1672 }
1673
1674 int
1675 rte_eth_dev_start(uint16_t port_id)
1676 {
1677         struct rte_eth_dev *dev;
1678         struct rte_eth_dev_info dev_info;
1679         int diag;
1680         int ret, ret_stop;
1681
1682         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1683
1684         dev = &rte_eth_devices[port_id];
1685
1686         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1687
1688         if (dev->data->dev_started != 0) {
1689                 RTE_ETHDEV_LOG(INFO,
1690                         "Device with port_id=%"PRIu16" already started\n",
1691                         port_id);
1692                 return 0;
1693         }
1694
1695         ret = rte_eth_dev_info_get(port_id, &dev_info);
1696         if (ret != 0)
1697                 return ret;
1698
1699         /* Lets restore MAC now if device does not support live change */
1700         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1701                 rte_eth_dev_mac_restore(dev, &dev_info);
1702
1703         diag = (*dev->dev_ops->dev_start)(dev);
1704         if (diag == 0)
1705                 dev->data->dev_started = 1;
1706         else
1707                 return eth_err(port_id, diag);
1708
1709         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1710         if (ret != 0) {
1711                 RTE_ETHDEV_LOG(ERR,
1712                         "Error during restoring configuration for device (port %u): %s\n",
1713                         port_id, rte_strerror(-ret));
1714                 ret_stop = rte_eth_dev_stop(port_id);
1715                 if (ret_stop != 0) {
1716                         RTE_ETHDEV_LOG(ERR,
1717                                 "Failed to stop device (port %u): %s\n",
1718                                 port_id, rte_strerror(-ret_stop));
1719                 }
1720
1721                 return ret;
1722         }
1723
1724         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1725                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1726                 (*dev->dev_ops->link_update)(dev, 0);
1727         }
1728
1729         rte_ethdev_trace_start(port_id);
1730         return 0;
1731 }
1732
1733 int
1734 rte_eth_dev_stop(uint16_t port_id)
1735 {
1736         struct rte_eth_dev *dev;
1737         int ret;
1738
1739         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1740         dev = &rte_eth_devices[port_id];
1741
1742         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1743
1744         if (dev->data->dev_started == 0) {
1745                 RTE_ETHDEV_LOG(INFO,
1746                         "Device with port_id=%"PRIu16" already stopped\n",
1747                         port_id);
1748                 return 0;
1749         }
1750
1751         dev->data->dev_started = 0;
1752         ret = (*dev->dev_ops->dev_stop)(dev);
1753         rte_ethdev_trace_stop(port_id, ret);
1754
1755         return ret;
1756 }
1757
1758 int
1759 rte_eth_dev_set_link_up(uint16_t port_id)
1760 {
1761         struct rte_eth_dev *dev;
1762
1763         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1764
1765         dev = &rte_eth_devices[port_id];
1766
1767         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1768         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1769 }
1770
1771 int
1772 rte_eth_dev_set_link_down(uint16_t port_id)
1773 {
1774         struct rte_eth_dev *dev;
1775
1776         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1777
1778         dev = &rte_eth_devices[port_id];
1779
1780         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1781         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1782 }
1783
1784 int
1785 rte_eth_dev_close(uint16_t port_id)
1786 {
1787         struct rte_eth_dev *dev;
1788         int firsterr, binerr;
1789         int *lasterr = &firsterr;
1790
1791         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1792         dev = &rte_eth_devices[port_id];
1793
1794         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1795         *lasterr = (*dev->dev_ops->dev_close)(dev);
1796         if (*lasterr != 0)
1797                 lasterr = &binerr;
1798
1799         rte_ethdev_trace_close(port_id);
1800         *lasterr = rte_eth_dev_release_port(dev);
1801
1802         return eth_err(port_id, firsterr);
1803 }
1804
1805 int
1806 rte_eth_dev_reset(uint16_t port_id)
1807 {
1808         struct rte_eth_dev *dev;
1809         int ret;
1810
1811         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1812         dev = &rte_eth_devices[port_id];
1813
1814         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1815
1816         ret = rte_eth_dev_stop(port_id);
1817         if (ret != 0) {
1818                 RTE_ETHDEV_LOG(ERR,
1819                         "Failed to stop device (port %u) before reset: %s - ignore\n",
1820                         port_id, rte_strerror(-ret));
1821         }
1822         ret = dev->dev_ops->dev_reset(dev);
1823
1824         return eth_err(port_id, ret);
1825 }
1826
1827 int
1828 rte_eth_dev_is_removed(uint16_t port_id)
1829 {
1830         struct rte_eth_dev *dev;
1831         int ret;
1832
1833         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1834
1835         dev = &rte_eth_devices[port_id];
1836
1837         if (dev->state == RTE_ETH_DEV_REMOVED)
1838                 return 1;
1839
1840         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1841
1842         ret = dev->dev_ops->is_removed(dev);
1843         if (ret != 0)
1844                 /* Device is physically removed. */
1845                 dev->state = RTE_ETH_DEV_REMOVED;
1846
1847         return ret;
1848 }
1849
1850 static int
1851 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1852                              uint16_t n_seg, uint32_t *mbp_buf_size,
1853                              const struct rte_eth_dev_info *dev_info)
1854 {
1855         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1856         struct rte_mempool *mp_first;
1857         uint32_t offset_mask;
1858         uint16_t seg_idx;
1859
1860         if (n_seg > seg_capa->max_nseg) {
1861                 RTE_ETHDEV_LOG(ERR,
1862                                "Requested Rx segments %u exceed supported %u\n",
1863                                n_seg, seg_capa->max_nseg);
1864                 return -EINVAL;
1865         }
1866         /*
1867          * Check the sizes and offsets against buffer sizes
1868          * for each segment specified in extended configuration.
1869          */
1870         mp_first = rx_seg[0].mp;
1871         offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1872         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1873                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1874                 uint32_t length = rx_seg[seg_idx].length;
1875                 uint32_t offset = rx_seg[seg_idx].offset;
1876
1877                 if (mpl == NULL) {
1878                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1879                         return -EINVAL;
1880                 }
1881                 if (seg_idx != 0 && mp_first != mpl &&
1882                     seg_capa->multi_pools == 0) {
1883                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1884                         return -ENOTSUP;
1885                 }
1886                 if (offset != 0) {
1887                         if (seg_capa->offset_allowed == 0) {
1888                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1889                                 return -ENOTSUP;
1890                         }
1891                         if (offset & offset_mask) {
1892                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1893                                                offset,
1894                                                seg_capa->offset_align_log2);
1895                                 return -EINVAL;
1896                         }
1897                 }
1898                 if (mpl->private_data_size <
1899                         sizeof(struct rte_pktmbuf_pool_private)) {
1900                         RTE_ETHDEV_LOG(ERR,
1901                                        "%s private_data_size %u < %u\n",
1902                                        mpl->name, mpl->private_data_size,
1903                                        (unsigned int)sizeof
1904                                         (struct rte_pktmbuf_pool_private));
1905                         return -ENOSPC;
1906                 }
1907                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1908                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1909                 length = length != 0 ? length : *mbp_buf_size;
1910                 if (*mbp_buf_size < length + offset) {
1911                         RTE_ETHDEV_LOG(ERR,
1912                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1913                                        mpl->name, *mbp_buf_size,
1914                                        length + offset, length, offset);
1915                         return -EINVAL;
1916                 }
1917         }
1918         return 0;
1919 }
1920
1921 int
1922 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1923                        uint16_t nb_rx_desc, unsigned int socket_id,
1924                        const struct rte_eth_rxconf *rx_conf,
1925                        struct rte_mempool *mp)
1926 {
1927         int ret;
1928         uint32_t mbp_buf_size;
1929         struct rte_eth_dev *dev;
1930         struct rte_eth_dev_info dev_info;
1931         struct rte_eth_rxconf local_conf;
1932         void **rxq;
1933
1934         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1935
1936         dev = &rte_eth_devices[port_id];
1937         if (rx_queue_id >= dev->data->nb_rx_queues) {
1938                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1939                 return -EINVAL;
1940         }
1941
1942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1943
1944         ret = rte_eth_dev_info_get(port_id, &dev_info);
1945         if (ret != 0)
1946                 return ret;
1947
1948         if (mp != NULL) {
1949                 /* Single pool configuration check. */
1950                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1951                         RTE_ETHDEV_LOG(ERR,
1952                                        "Ambiguous segment configuration\n");
1953                         return -EINVAL;
1954                 }
1955                 /*
1956                  * Check the size of the mbuf data buffer, this value
1957                  * must be provided in the private data of the memory pool.
1958                  * First check that the memory pool(s) has a valid private data.
1959                  */
1960                 if (mp->private_data_size <
1961                                 sizeof(struct rte_pktmbuf_pool_private)) {
1962                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1963                                 mp->name, mp->private_data_size,
1964                                 (unsigned int)
1965                                 sizeof(struct rte_pktmbuf_pool_private));
1966                         return -ENOSPC;
1967                 }
1968                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1969                 if (mbp_buf_size < dev_info.min_rx_bufsize +
1970                                    RTE_PKTMBUF_HEADROOM) {
1971                         RTE_ETHDEV_LOG(ERR,
1972                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1973                                        mp->name, mbp_buf_size,
1974                                        RTE_PKTMBUF_HEADROOM +
1975                                        dev_info.min_rx_bufsize,
1976                                        RTE_PKTMBUF_HEADROOM,
1977                                        dev_info.min_rx_bufsize);
1978                         return -EINVAL;
1979                 }
1980         } else {
1981                 const struct rte_eth_rxseg_split *rx_seg =
1982                         (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
1983                 uint16_t n_seg = rx_conf->rx_nseg;
1984
1985                 /* Extended multi-segment configuration check. */
1986                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
1987                         RTE_ETHDEV_LOG(ERR,
1988                                        "Memory pool is null and no extended configuration provided\n");
1989                         return -EINVAL;
1990                 }
1991                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1992                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
1993                                                            &mbp_buf_size,
1994                                                            &dev_info);
1995                         if (ret != 0)
1996                                 return ret;
1997                 } else {
1998                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
1999                         return -EINVAL;
2000                 }
2001         }
2002
2003         /* Use default specified by driver, if nb_rx_desc is zero */
2004         if (nb_rx_desc == 0) {
2005                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
2006                 /* If driver default is also zero, fall back on EAL default */
2007                 if (nb_rx_desc == 0)
2008                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2009         }
2010
2011         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2012                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2013                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2014
2015                 RTE_ETHDEV_LOG(ERR,
2016                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2017                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2018                         dev_info.rx_desc_lim.nb_min,
2019                         dev_info.rx_desc_lim.nb_align);
2020                 return -EINVAL;
2021         }
2022
2023         if (dev->data->dev_started &&
2024                 !(dev_info.dev_capa &
2025                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2026                 return -EBUSY;
2027
2028         if (dev->data->dev_started &&
2029                 (dev->data->rx_queue_state[rx_queue_id] !=
2030                         RTE_ETH_QUEUE_STATE_STOPPED))
2031                 return -EBUSY;
2032
2033         rxq = dev->data->rx_queues;
2034         if (rxq[rx_queue_id]) {
2035                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2036                                         -ENOTSUP);
2037                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2038                 rxq[rx_queue_id] = NULL;
2039         }
2040
2041         if (rx_conf == NULL)
2042                 rx_conf = &dev_info.default_rxconf;
2043
2044         local_conf = *rx_conf;
2045
2046         /*
2047          * If an offloading has already been enabled in
2048          * rte_eth_dev_configure(), it has been enabled on all queues,
2049          * so there is no need to enable it in this queue again.
2050          * The local_conf.offloads input to underlying PMD only carries
2051          * those offloadings which are only enabled on this queue and
2052          * not enabled on all queues.
2053          */
2054         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2055
2056         /*
2057          * New added offloadings for this queue are those not enabled in
2058          * rte_eth_dev_configure() and they must be per-queue type.
2059          * A pure per-port offloading can't be enabled on a queue while
2060          * disabled on another queue. A pure per-port offloading can't
2061          * be enabled for any queue as new added one if it hasn't been
2062          * enabled in rte_eth_dev_configure().
2063          */
2064         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2065              local_conf.offloads) {
2066                 RTE_ETHDEV_LOG(ERR,
2067                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2068                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2069                         port_id, rx_queue_id, local_conf.offloads,
2070                         dev_info.rx_queue_offload_capa,
2071                         __func__);
2072                 return -EINVAL;
2073         }
2074
2075         /*
2076          * If LRO is enabled, check that the maximum aggregated packet
2077          * size is supported by the configured device.
2078          */
2079         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2080                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2081                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
2082                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2083                 int ret = check_lro_pkt_size(port_id,
2084                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2085                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2086                                 dev_info.max_lro_pkt_size);
2087                 if (ret != 0)
2088                         return ret;
2089         }
2090
2091         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2092                                               socket_id, &local_conf, mp);
2093         if (!ret) {
2094                 if (!dev->data->min_rx_buf_size ||
2095                     dev->data->min_rx_buf_size > mbp_buf_size)
2096                         dev->data->min_rx_buf_size = mbp_buf_size;
2097         }
2098
2099         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2100                 rx_conf, ret);
2101         return eth_err(port_id, ret);
2102 }
2103
2104 int
2105 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2106                                uint16_t nb_rx_desc,
2107                                const struct rte_eth_hairpin_conf *conf)
2108 {
2109         int ret;
2110         struct rte_eth_dev *dev;
2111         struct rte_eth_hairpin_cap cap;
2112         void **rxq;
2113         int i;
2114         int count;
2115
2116         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2117
2118         dev = &rte_eth_devices[port_id];
2119         if (rx_queue_id >= dev->data->nb_rx_queues) {
2120                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2121                 return -EINVAL;
2122         }
2123         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2124         if (ret != 0)
2125                 return ret;
2126         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2127                                 -ENOTSUP);
2128         /* if nb_rx_desc is zero use max number of desc from the driver. */
2129         if (nb_rx_desc == 0)
2130                 nb_rx_desc = cap.max_nb_desc;
2131         if (nb_rx_desc > cap.max_nb_desc) {
2132                 RTE_ETHDEV_LOG(ERR,
2133                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2134                         nb_rx_desc, cap.max_nb_desc);
2135                 return -EINVAL;
2136         }
2137         if (conf->peer_count > cap.max_rx_2_tx) {
2138                 RTE_ETHDEV_LOG(ERR,
2139                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2140                         conf->peer_count, cap.max_rx_2_tx);
2141                 return -EINVAL;
2142         }
2143         if (conf->peer_count == 0) {
2144                 RTE_ETHDEV_LOG(ERR,
2145                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2146                         conf->peer_count);
2147                 return -EINVAL;
2148         }
2149         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2150              cap.max_nb_queues != UINT16_MAX; i++) {
2151                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2152                         count++;
2153         }
2154         if (count > cap.max_nb_queues) {
2155                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2156                 cap.max_nb_queues);
2157                 return -EINVAL;
2158         }
2159         if (dev->data->dev_started)
2160                 return -EBUSY;
2161         rxq = dev->data->rx_queues;
2162         if (rxq[rx_queue_id] != NULL) {
2163                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2164                                         -ENOTSUP);
2165                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2166                 rxq[rx_queue_id] = NULL;
2167         }
2168         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2169                                                       nb_rx_desc, conf);
2170         if (ret == 0)
2171                 dev->data->rx_queue_state[rx_queue_id] =
2172                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2173         return eth_err(port_id, ret);
2174 }
2175
2176 int
2177 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2178                        uint16_t nb_tx_desc, unsigned int socket_id,
2179                        const struct rte_eth_txconf *tx_conf)
2180 {
2181         struct rte_eth_dev *dev;
2182         struct rte_eth_dev_info dev_info;
2183         struct rte_eth_txconf local_conf;
2184         void **txq;
2185         int ret;
2186
2187         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2188
2189         dev = &rte_eth_devices[port_id];
2190         if (tx_queue_id >= dev->data->nb_tx_queues) {
2191                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2192                 return -EINVAL;
2193         }
2194
2195         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2196
2197         ret = rte_eth_dev_info_get(port_id, &dev_info);
2198         if (ret != 0)
2199                 return ret;
2200
2201         /* Use default specified by driver, if nb_tx_desc is zero */
2202         if (nb_tx_desc == 0) {
2203                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2204                 /* If driver default is zero, fall back on EAL default */
2205                 if (nb_tx_desc == 0)
2206                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2207         }
2208         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2209             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2210             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2211                 RTE_ETHDEV_LOG(ERR,
2212                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2213                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2214                         dev_info.tx_desc_lim.nb_min,
2215                         dev_info.tx_desc_lim.nb_align);
2216                 return -EINVAL;
2217         }
2218
2219         if (dev->data->dev_started &&
2220                 !(dev_info.dev_capa &
2221                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2222                 return -EBUSY;
2223
2224         if (dev->data->dev_started &&
2225                 (dev->data->tx_queue_state[tx_queue_id] !=
2226                         RTE_ETH_QUEUE_STATE_STOPPED))
2227                 return -EBUSY;
2228
2229         txq = dev->data->tx_queues;
2230         if (txq[tx_queue_id]) {
2231                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2232                                         -ENOTSUP);
2233                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2234                 txq[tx_queue_id] = NULL;
2235         }
2236
2237         if (tx_conf == NULL)
2238                 tx_conf = &dev_info.default_txconf;
2239
2240         local_conf = *tx_conf;
2241
2242         /*
2243          * If an offloading has already been enabled in
2244          * rte_eth_dev_configure(), it has been enabled on all queues,
2245          * so there is no need to enable it in this queue again.
2246          * The local_conf.offloads input to underlying PMD only carries
2247          * those offloadings which are only enabled on this queue and
2248          * not enabled on all queues.
2249          */
2250         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2251
2252         /*
2253          * New added offloadings for this queue are those not enabled in
2254          * rte_eth_dev_configure() and they must be per-queue type.
2255          * A pure per-port offloading can't be enabled on a queue while
2256          * disabled on another queue. A pure per-port offloading can't
2257          * be enabled for any queue as new added one if it hasn't been
2258          * enabled in rte_eth_dev_configure().
2259          */
2260         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2261              local_conf.offloads) {
2262                 RTE_ETHDEV_LOG(ERR,
2263                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2264                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2265                         port_id, tx_queue_id, local_conf.offloads,
2266                         dev_info.tx_queue_offload_capa,
2267                         __func__);
2268                 return -EINVAL;
2269         }
2270
2271         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2272         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2273                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2274 }
2275
2276 int
2277 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2278                                uint16_t nb_tx_desc,
2279                                const struct rte_eth_hairpin_conf *conf)
2280 {
2281         struct rte_eth_dev *dev;
2282         struct rte_eth_hairpin_cap cap;
2283         void **txq;
2284         int i;
2285         int count;
2286         int ret;
2287
2288         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2289         dev = &rte_eth_devices[port_id];
2290         if (tx_queue_id >= dev->data->nb_tx_queues) {
2291                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2292                 return -EINVAL;
2293         }
2294         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2295         if (ret != 0)
2296                 return ret;
2297         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2298                                 -ENOTSUP);
2299         /* if nb_rx_desc is zero use max number of desc from the driver. */
2300         if (nb_tx_desc == 0)
2301                 nb_tx_desc = cap.max_nb_desc;
2302         if (nb_tx_desc > cap.max_nb_desc) {
2303                 RTE_ETHDEV_LOG(ERR,
2304                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2305                         nb_tx_desc, cap.max_nb_desc);
2306                 return -EINVAL;
2307         }
2308         if (conf->peer_count > cap.max_tx_2_rx) {
2309                 RTE_ETHDEV_LOG(ERR,
2310                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2311                         conf->peer_count, cap.max_tx_2_rx);
2312                 return -EINVAL;
2313         }
2314         if (conf->peer_count == 0) {
2315                 RTE_ETHDEV_LOG(ERR,
2316                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2317                         conf->peer_count);
2318                 return -EINVAL;
2319         }
2320         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2321              cap.max_nb_queues != UINT16_MAX; i++) {
2322                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2323                         count++;
2324         }
2325         if (count > cap.max_nb_queues) {
2326                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2327                 cap.max_nb_queues);
2328                 return -EINVAL;
2329         }
2330         if (dev->data->dev_started)
2331                 return -EBUSY;
2332         txq = dev->data->tx_queues;
2333         if (txq[tx_queue_id] != NULL) {
2334                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2335                                         -ENOTSUP);
2336                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2337                 txq[tx_queue_id] = NULL;
2338         }
2339         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2340                 (dev, tx_queue_id, nb_tx_desc, conf);
2341         if (ret == 0)
2342                 dev->data->tx_queue_state[tx_queue_id] =
2343                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2344         return eth_err(port_id, ret);
2345 }
2346
2347 int
2348 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2349 {
2350         struct rte_eth_dev *dev;
2351         int ret;
2352
2353         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2354         dev = &rte_eth_devices[tx_port];
2355         if (dev->data->dev_started == 0) {
2356                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2357                 return -EBUSY;
2358         }
2359
2360         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2361         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2362         if (ret != 0)
2363                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2364                                " to Rx %d (%d - all ports)\n",
2365                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2366
2367         return ret;
2368 }
2369
2370 int
2371 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2372 {
2373         struct rte_eth_dev *dev;
2374         int ret;
2375
2376         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2377         dev = &rte_eth_devices[tx_port];
2378         if (dev->data->dev_started == 0) {
2379                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2380                 return -EBUSY;
2381         }
2382
2383         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2384         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2385         if (ret != 0)
2386                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2387                                " from Rx %d (%d - all ports)\n",
2388                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2389
2390         return ret;
2391 }
2392
2393 int
2394 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2395                                size_t len, uint32_t direction)
2396 {
2397         struct rte_eth_dev *dev;
2398         int ret;
2399
2400         if (peer_ports == NULL || len == 0)
2401                 return -EINVAL;
2402
2403         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2404         dev = &rte_eth_devices[port_id];
2405         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2406                                 -ENOTSUP);
2407
2408         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2409                                                       len, direction);
2410         if (ret < 0)
2411                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2412                                port_id, direction ? "Rx" : "Tx");
2413
2414         return ret;
2415 }
2416
2417 void
2418 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2419                 void *userdata __rte_unused)
2420 {
2421         rte_pktmbuf_free_bulk(pkts, unsent);
2422 }
2423
2424 void
2425 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2426                 void *userdata)
2427 {
2428         uint64_t *count = userdata;
2429
2430         rte_pktmbuf_free_bulk(pkts, unsent);
2431         *count += unsent;
2432 }
2433
2434 int
2435 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2436                 buffer_tx_error_fn cbfn, void *userdata)
2437 {
2438         buffer->error_callback = cbfn;
2439         buffer->error_userdata = userdata;
2440         return 0;
2441 }
2442
2443 int
2444 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2445 {
2446         int ret = 0;
2447
2448         if (buffer == NULL)
2449                 return -EINVAL;
2450
2451         buffer->size = size;
2452         if (buffer->error_callback == NULL) {
2453                 ret = rte_eth_tx_buffer_set_err_callback(
2454                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2455         }
2456
2457         return ret;
2458 }
2459
2460 int
2461 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2462 {
2463         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2464         int ret;
2465
2466         /* Validate Input Data. Bail if not valid or not supported. */
2467         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2468         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2469
2470         /* Call driver to free pending mbufs. */
2471         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2472                                                free_cnt);
2473         return eth_err(port_id, ret);
2474 }
2475
2476 int
2477 rte_eth_promiscuous_enable(uint16_t port_id)
2478 {
2479         struct rte_eth_dev *dev;
2480         int diag = 0;
2481
2482         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2483         dev = &rte_eth_devices[port_id];
2484
2485         if (dev->data->promiscuous == 1)
2486                 return 0;
2487
2488         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2489
2490         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2491         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2492
2493         return eth_err(port_id, diag);
2494 }
2495
2496 int
2497 rte_eth_promiscuous_disable(uint16_t port_id)
2498 {
2499         struct rte_eth_dev *dev;
2500         int diag = 0;
2501
2502         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2503         dev = &rte_eth_devices[port_id];
2504
2505         if (dev->data->promiscuous == 0)
2506                 return 0;
2507
2508         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2509
2510         dev->data->promiscuous = 0;
2511         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2512         if (diag != 0)
2513                 dev->data->promiscuous = 1;
2514
2515         return eth_err(port_id, diag);
2516 }
2517
2518 int
2519 rte_eth_promiscuous_get(uint16_t port_id)
2520 {
2521         struct rte_eth_dev *dev;
2522
2523         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2524
2525         dev = &rte_eth_devices[port_id];
2526         return dev->data->promiscuous;
2527 }
2528
2529 int
2530 rte_eth_allmulticast_enable(uint16_t port_id)
2531 {
2532         struct rte_eth_dev *dev;
2533         int diag;
2534
2535         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2536         dev = &rte_eth_devices[port_id];
2537
2538         if (dev->data->all_multicast == 1)
2539                 return 0;
2540
2541         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2542         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2543         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2544
2545         return eth_err(port_id, diag);
2546 }
2547
2548 int
2549 rte_eth_allmulticast_disable(uint16_t port_id)
2550 {
2551         struct rte_eth_dev *dev;
2552         int diag;
2553
2554         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2555         dev = &rte_eth_devices[port_id];
2556
2557         if (dev->data->all_multicast == 0)
2558                 return 0;
2559
2560         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2561         dev->data->all_multicast = 0;
2562         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2563         if (diag != 0)
2564                 dev->data->all_multicast = 1;
2565
2566         return eth_err(port_id, diag);
2567 }
2568
2569 int
2570 rte_eth_allmulticast_get(uint16_t port_id)
2571 {
2572         struct rte_eth_dev *dev;
2573
2574         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2575
2576         dev = &rte_eth_devices[port_id];
2577         return dev->data->all_multicast;
2578 }
2579
2580 int
2581 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2582 {
2583         struct rte_eth_dev *dev;
2584
2585         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2586         dev = &rte_eth_devices[port_id];
2587
2588         if (dev->data->dev_conf.intr_conf.lsc &&
2589             dev->data->dev_started)
2590                 rte_eth_linkstatus_get(dev, eth_link);
2591         else {
2592                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2593                 (*dev->dev_ops->link_update)(dev, 1);
2594                 *eth_link = dev->data->dev_link;
2595         }
2596
2597         return 0;
2598 }
2599
2600 int
2601 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2602 {
2603         struct rte_eth_dev *dev;
2604
2605         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2606         dev = &rte_eth_devices[port_id];
2607
2608         if (dev->data->dev_conf.intr_conf.lsc &&
2609             dev->data->dev_started)
2610                 rte_eth_linkstatus_get(dev, eth_link);
2611         else {
2612                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2613                 (*dev->dev_ops->link_update)(dev, 0);
2614                 *eth_link = dev->data->dev_link;
2615         }
2616
2617         return 0;
2618 }
2619
2620 const char *
2621 rte_eth_link_speed_to_str(uint32_t link_speed)
2622 {
2623         switch (link_speed) {
2624         case ETH_SPEED_NUM_NONE: return "None";
2625         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2626         case ETH_SPEED_NUM_100M: return "100 Mbps";
2627         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2628         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2629         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2630         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2631         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2632         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2633         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2634         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2635         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2636         case ETH_SPEED_NUM_100G: return "100 Gbps";
2637         case ETH_SPEED_NUM_200G: return "200 Gbps";
2638         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2639         default: return "Invalid";
2640         }
2641 }
2642
2643 int
2644 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2645 {
2646         if (eth_link->link_status == ETH_LINK_DOWN)
2647                 return snprintf(str, len, "Link down");
2648         else
2649                 return snprintf(str, len, "Link up at %s %s %s",
2650                         rte_eth_link_speed_to_str(eth_link->link_speed),
2651                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2652                         "FDX" : "HDX",
2653                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2654                         "Autoneg" : "Fixed");
2655 }
2656
2657 int
2658 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2659 {
2660         struct rte_eth_dev *dev;
2661
2662         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2663
2664         dev = &rte_eth_devices[port_id];
2665         memset(stats, 0, sizeof(*stats));
2666
2667         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2668         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2669         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2670 }
2671
2672 int
2673 rte_eth_stats_reset(uint16_t port_id)
2674 {
2675         struct rte_eth_dev *dev;
2676         int ret;
2677
2678         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2679         dev = &rte_eth_devices[port_id];
2680
2681         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2682         ret = (*dev->dev_ops->stats_reset)(dev);
2683         if (ret != 0)
2684                 return eth_err(port_id, ret);
2685
2686         dev->data->rx_mbuf_alloc_failed = 0;
2687
2688         return 0;
2689 }
2690
2691 static inline int
2692 get_xstats_basic_count(struct rte_eth_dev *dev)
2693 {
2694         uint16_t nb_rxqs, nb_txqs;
2695         int count;
2696
2697         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2698         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2699
2700         count = RTE_NB_STATS;
2701         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2702                 count += nb_rxqs * RTE_NB_RXQ_STATS;
2703                 count += nb_txqs * RTE_NB_TXQ_STATS;
2704         }
2705
2706         return count;
2707 }
2708
2709 static int
2710 get_xstats_count(uint16_t port_id)
2711 {
2712         struct rte_eth_dev *dev;
2713         int count;
2714
2715         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2716         dev = &rte_eth_devices[port_id];
2717         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2718                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2719                                 NULL, 0);
2720                 if (count < 0)
2721                         return eth_err(port_id, count);
2722         }
2723         if (dev->dev_ops->xstats_get_names != NULL) {
2724                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2725                 if (count < 0)
2726                         return eth_err(port_id, count);
2727         } else
2728                 count = 0;
2729
2730
2731         count += get_xstats_basic_count(dev);
2732
2733         return count;
2734 }
2735
2736 int
2737 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2738                 uint64_t *id)
2739 {
2740         int cnt_xstats, idx_xstat;
2741
2742         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2743
2744         if (!id) {
2745                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2746                 return -ENOMEM;
2747         }
2748
2749         if (!xstat_name) {
2750                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2751                 return -ENOMEM;
2752         }
2753
2754         /* Get count */
2755         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2756         if (cnt_xstats  < 0) {
2757                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2758                 return -ENODEV;
2759         }
2760
2761         /* Get id-name lookup table */
2762         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2763
2764         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2765                         port_id, xstats_names, cnt_xstats, NULL)) {
2766                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2767                 return -1;
2768         }
2769
2770         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2771                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2772                         *id = idx_xstat;
2773                         return 0;
2774                 };
2775         }
2776
2777         return -EINVAL;
2778 }
2779
2780 /* retrieve basic stats names */
2781 static int
2782 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2783         struct rte_eth_xstat_name *xstats_names)
2784 {
2785         int cnt_used_entries = 0;
2786         uint32_t idx, id_queue;
2787         uint16_t num_q;
2788
2789         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2790                 strlcpy(xstats_names[cnt_used_entries].name,
2791                         rte_stats_strings[idx].name,
2792                         sizeof(xstats_names[0].name));
2793                 cnt_used_entries++;
2794         }
2795
2796         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2797                 return cnt_used_entries;
2798
2799         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2800         for (id_queue = 0; id_queue < num_q; id_queue++) {
2801                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2802                         snprintf(xstats_names[cnt_used_entries].name,
2803                                 sizeof(xstats_names[0].name),
2804                                 "rx_q%u_%s",
2805                                 id_queue, rte_rxq_stats_strings[idx].name);
2806                         cnt_used_entries++;
2807                 }
2808
2809         }
2810         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2811         for (id_queue = 0; id_queue < num_q; id_queue++) {
2812                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2813                         snprintf(xstats_names[cnt_used_entries].name,
2814                                 sizeof(xstats_names[0].name),
2815                                 "tx_q%u_%s",
2816                                 id_queue, rte_txq_stats_strings[idx].name);
2817                         cnt_used_entries++;
2818                 }
2819         }
2820         return cnt_used_entries;
2821 }
2822
2823 /* retrieve ethdev extended statistics names */
2824 int
2825 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2826         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2827         uint64_t *ids)
2828 {
2829         struct rte_eth_xstat_name *xstats_names_copy;
2830         unsigned int no_basic_stat_requested = 1;
2831         unsigned int no_ext_stat_requested = 1;
2832         unsigned int expected_entries;
2833         unsigned int basic_count;
2834         struct rte_eth_dev *dev;
2835         unsigned int i;
2836         int ret;
2837
2838         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2839         dev = &rte_eth_devices[port_id];
2840
2841         basic_count = get_xstats_basic_count(dev);
2842         ret = get_xstats_count(port_id);
2843         if (ret < 0)
2844                 return ret;
2845         expected_entries = (unsigned int)ret;
2846
2847         /* Return max number of stats if no ids given */
2848         if (!ids) {
2849                 if (!xstats_names)
2850                         return expected_entries;
2851                 else if (xstats_names && size < expected_entries)
2852                         return expected_entries;
2853         }
2854
2855         if (ids && !xstats_names)
2856                 return -EINVAL;
2857
2858         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2859                 uint64_t ids_copy[size];
2860
2861                 for (i = 0; i < size; i++) {
2862                         if (ids[i] < basic_count) {
2863                                 no_basic_stat_requested = 0;
2864                                 break;
2865                         }
2866
2867                         /*
2868                          * Convert ids to xstats ids that PMD knows.
2869                          * ids known by user are basic + extended stats.
2870                          */
2871                         ids_copy[i] = ids[i] - basic_count;
2872                 }
2873
2874                 if (no_basic_stat_requested)
2875                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2876                                         xstats_names, ids_copy, size);
2877         }
2878
2879         /* Retrieve all stats */
2880         if (!ids) {
2881                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2882                                 expected_entries);
2883                 if (num_stats < 0 || num_stats > (int)expected_entries)
2884                         return num_stats;
2885                 else
2886                         return expected_entries;
2887         }
2888
2889         xstats_names_copy = calloc(expected_entries,
2890                 sizeof(struct rte_eth_xstat_name));
2891
2892         if (!xstats_names_copy) {
2893                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2894                 return -ENOMEM;
2895         }
2896
2897         if (ids) {
2898                 for (i = 0; i < size; i++) {
2899                         if (ids[i] >= basic_count) {
2900                                 no_ext_stat_requested = 0;
2901                                 break;
2902                         }
2903                 }
2904         }
2905
2906         /* Fill xstats_names_copy structure */
2907         if (ids && no_ext_stat_requested) {
2908                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2909         } else {
2910                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2911                         expected_entries);
2912                 if (ret < 0) {
2913                         free(xstats_names_copy);
2914                         return ret;
2915                 }
2916         }
2917
2918         /* Filter stats */
2919         for (i = 0; i < size; i++) {
2920                 if (ids[i] >= expected_entries) {
2921                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2922                         free(xstats_names_copy);
2923                         return -1;
2924                 }
2925                 xstats_names[i] = xstats_names_copy[ids[i]];
2926         }
2927
2928         free(xstats_names_copy);
2929         return size;
2930 }
2931
2932 int
2933 rte_eth_xstats_get_names(uint16_t port_id,
2934         struct rte_eth_xstat_name *xstats_names,
2935         unsigned int size)
2936 {
2937         struct rte_eth_dev *dev;
2938         int cnt_used_entries;
2939         int cnt_expected_entries;
2940         int cnt_driver_entries;
2941
2942         cnt_expected_entries = get_xstats_count(port_id);
2943         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2944                         (int)size < cnt_expected_entries)
2945                 return cnt_expected_entries;
2946
2947         /* port_id checked in get_xstats_count() */
2948         dev = &rte_eth_devices[port_id];
2949
2950         cnt_used_entries = rte_eth_basic_stats_get_names(
2951                 dev, xstats_names);
2952
2953         if (dev->dev_ops->xstats_get_names != NULL) {
2954                 /* If there are any driver-specific xstats, append them
2955                  * to end of list.
2956                  */
2957                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2958                         dev,
2959                         xstats_names + cnt_used_entries,
2960                         size - cnt_used_entries);
2961                 if (cnt_driver_entries < 0)
2962                         return eth_err(port_id, cnt_driver_entries);
2963                 cnt_used_entries += cnt_driver_entries;
2964         }
2965
2966         return cnt_used_entries;
2967 }
2968
2969
2970 static int
2971 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2972 {
2973         struct rte_eth_dev *dev;
2974         struct rte_eth_stats eth_stats;
2975         unsigned int count = 0, i, q;
2976         uint64_t val, *stats_ptr;
2977         uint16_t nb_rxqs, nb_txqs;
2978         int ret;
2979
2980         ret = rte_eth_stats_get(port_id, &eth_stats);
2981         if (ret < 0)
2982                 return ret;
2983
2984         dev = &rte_eth_devices[port_id];
2985
2986         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2987         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2988
2989         /* global stats */
2990         for (i = 0; i < RTE_NB_STATS; i++) {
2991                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2992                                         rte_stats_strings[i].offset);
2993                 val = *stats_ptr;
2994                 xstats[count++].value = val;
2995         }
2996
2997         if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2998                 return count;
2999
3000         /* per-rxq stats */
3001         for (q = 0; q < nb_rxqs; q++) {
3002                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3003                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3004                                         rte_rxq_stats_strings[i].offset +
3005                                         q * sizeof(uint64_t));
3006                         val = *stats_ptr;
3007                         xstats[count++].value = val;
3008                 }
3009         }
3010
3011         /* per-txq stats */
3012         for (q = 0; q < nb_txqs; q++) {
3013                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3014                         stats_ptr = RTE_PTR_ADD(&eth_stats,
3015                                         rte_txq_stats_strings[i].offset +
3016                                         q * sizeof(uint64_t));
3017                         val = *stats_ptr;
3018                         xstats[count++].value = val;
3019                 }
3020         }
3021         return count;
3022 }
3023
3024 /* retrieve ethdev extended statistics */
3025 int
3026 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3027                          uint64_t *values, unsigned int size)
3028 {
3029         unsigned int no_basic_stat_requested = 1;
3030         unsigned int no_ext_stat_requested = 1;
3031         unsigned int num_xstats_filled;
3032         unsigned int basic_count;
3033         uint16_t expected_entries;
3034         struct rte_eth_dev *dev;
3035         unsigned int i;
3036         int ret;
3037
3038         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3039         ret = get_xstats_count(port_id);
3040         if (ret < 0)
3041                 return ret;
3042         expected_entries = (uint16_t)ret;
3043         struct rte_eth_xstat xstats[expected_entries];
3044         dev = &rte_eth_devices[port_id];
3045         basic_count = get_xstats_basic_count(dev);
3046
3047         /* Return max number of stats if no ids given */
3048         if (!ids) {
3049                 if (!values)
3050                         return expected_entries;
3051                 else if (values && size < expected_entries)
3052                         return expected_entries;
3053         }
3054
3055         if (ids && !values)
3056                 return -EINVAL;
3057
3058         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3059                 unsigned int basic_count = get_xstats_basic_count(dev);
3060                 uint64_t ids_copy[size];
3061
3062                 for (i = 0; i < size; i++) {
3063                         if (ids[i] < basic_count) {
3064                                 no_basic_stat_requested = 0;
3065                                 break;
3066                         }
3067
3068                         /*
3069                          * Convert ids to xstats ids that PMD knows.
3070                          * ids known by user are basic + extended stats.
3071                          */
3072                         ids_copy[i] = ids[i] - basic_count;
3073                 }
3074
3075                 if (no_basic_stat_requested)
3076                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3077                                         values, size);
3078         }
3079
3080         if (ids) {
3081                 for (i = 0; i < size; i++) {
3082                         if (ids[i] >= basic_count) {
3083                                 no_ext_stat_requested = 0;
3084                                 break;
3085                         }
3086                 }
3087         }
3088
3089         /* Fill the xstats structure */
3090         if (ids && no_ext_stat_requested)
3091                 ret = rte_eth_basic_stats_get(port_id, xstats);
3092         else
3093                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3094
3095         if (ret < 0)
3096                 return ret;
3097         num_xstats_filled = (unsigned int)ret;
3098
3099         /* Return all stats */
3100         if (!ids) {
3101                 for (i = 0; i < num_xstats_filled; i++)
3102                         values[i] = xstats[i].value;
3103                 return expected_entries;
3104         }
3105
3106         /* Filter stats */
3107         for (i = 0; i < size; i++) {
3108                 if (ids[i] >= expected_entries) {
3109                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3110                         return -1;
3111                 }
3112                 values[i] = xstats[ids[i]].value;
3113         }
3114         return size;
3115 }
3116
3117 int
3118 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3119         unsigned int n)
3120 {
3121         struct rte_eth_dev *dev;
3122         unsigned int count = 0, i;
3123         signed int xcount = 0;
3124         uint16_t nb_rxqs, nb_txqs;
3125         int ret;
3126
3127         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3128
3129         dev = &rte_eth_devices[port_id];
3130
3131         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3132         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3133
3134         /* Return generic statistics */
3135         count = RTE_NB_STATS;
3136         if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
3137                 count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
3138
3139         /* implemented by the driver */
3140         if (dev->dev_ops->xstats_get != NULL) {
3141                 /* Retrieve the xstats from the driver at the end of the
3142                  * xstats struct.
3143                  */
3144                 xcount = (*dev->dev_ops->xstats_get)(dev,
3145                                      xstats ? xstats + count : NULL,
3146                                      (n > count) ? n - count : 0);
3147
3148                 if (xcount < 0)
3149                         return eth_err(port_id, xcount);
3150         }
3151
3152         if (n < count + xcount || xstats == NULL)
3153                 return count + xcount;
3154
3155         /* now fill the xstats structure */
3156         ret = rte_eth_basic_stats_get(port_id, xstats);
3157         if (ret < 0)
3158                 return ret;
3159         count = ret;
3160
3161         for (i = 0; i < count; i++)
3162                 xstats[i].id = i;
3163         /* add an offset to driver-specific stats */
3164         for ( ; i < count + xcount; i++)
3165                 xstats[i].id += count;
3166
3167         return count + xcount;
3168 }
3169
3170 /* reset ethdev extended statistics */
3171 int
3172 rte_eth_xstats_reset(uint16_t port_id)
3173 {
3174         struct rte_eth_dev *dev;
3175
3176         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3177         dev = &rte_eth_devices[port_id];
3178
3179         /* implemented by the driver */
3180         if (dev->dev_ops->xstats_reset != NULL)
3181                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3182
3183         /* fallback to default */
3184         return rte_eth_stats_reset(port_id);
3185 }
3186
3187 static int
3188 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
3189                 uint8_t is_rx)
3190 {
3191         struct rte_eth_dev *dev;
3192
3193         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3194
3195         dev = &rte_eth_devices[port_id];
3196
3197         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3198
3199         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3200                 return -EINVAL;
3201
3202         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3203                 return -EINVAL;
3204
3205         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3206                 return -EINVAL;
3207
3208         return (*dev->dev_ops->queue_stats_mapping_set)
3209                         (dev, queue_id, stat_idx, is_rx);
3210 }
3211
3212
3213 int
3214 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3215                 uint8_t stat_idx)
3216 {
3217         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
3218                                                 stat_idx, STAT_QMAP_TX));
3219 }
3220
3221
3222 int
3223 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3224                 uint8_t stat_idx)
3225 {
3226         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
3227                                                 stat_idx, STAT_QMAP_RX));
3228 }
3229
3230 int
3231 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3232 {
3233         struct rte_eth_dev *dev;
3234
3235         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3236         dev = &rte_eth_devices[port_id];
3237
3238         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3239         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3240                                                         fw_version, fw_size));
3241 }
3242
3243 int
3244 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3245 {
3246         struct rte_eth_dev *dev;
3247         const struct rte_eth_desc_lim lim = {
3248                 .nb_max = UINT16_MAX,
3249                 .nb_min = 0,
3250                 .nb_align = 1,
3251                 .nb_seg_max = UINT16_MAX,
3252                 .nb_mtu_seg_max = UINT16_MAX,
3253         };
3254         int diag;
3255
3256         /*
3257          * Init dev_info before port_id check since caller does not have
3258          * return status and does not know if get is successful or not.
3259          */
3260         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3261         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3262
3263         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3264         dev = &rte_eth_devices[port_id];
3265
3266         dev_info->rx_desc_lim = lim;
3267         dev_info->tx_desc_lim = lim;
3268         dev_info->device = dev->device;
3269         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3270         dev_info->max_mtu = UINT16_MAX;
3271
3272         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3273         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3274         if (diag != 0) {
3275                 /* Cleanup already filled in device information */
3276                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3277                 return eth_err(port_id, diag);
3278         }
3279
3280         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3281         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3282                         RTE_MAX_QUEUES_PER_PORT);
3283         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3284                         RTE_MAX_QUEUES_PER_PORT);
3285
3286         dev_info->driver_name = dev->device->driver->name;
3287         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3288         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3289
3290         dev_info->dev_flags = &dev->data->dev_flags;
3291
3292         return 0;
3293 }
3294
3295 int
3296 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3297                                  uint32_t *ptypes, int num)
3298 {
3299         int i, j;
3300         struct rte_eth_dev *dev;
3301         const uint32_t *all_ptypes;
3302
3303         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3304         dev = &rte_eth_devices[port_id];
3305         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3306         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3307
3308         if (!all_ptypes)
3309                 return 0;
3310
3311         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3312                 if (all_ptypes[i] & ptype_mask) {
3313                         if (j < num)
3314                                 ptypes[j] = all_ptypes[i];
3315                         j++;
3316                 }
3317
3318         return j;
3319 }
3320
3321 int
3322 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3323                                  uint32_t *set_ptypes, unsigned int num)
3324 {
3325         const uint32_t valid_ptype_masks[] = {
3326                 RTE_PTYPE_L2_MASK,
3327                 RTE_PTYPE_L3_MASK,
3328                 RTE_PTYPE_L4_MASK,
3329                 RTE_PTYPE_TUNNEL_MASK,
3330                 RTE_PTYPE_INNER_L2_MASK,
3331                 RTE_PTYPE_INNER_L3_MASK,
3332                 RTE_PTYPE_INNER_L4_MASK,
3333         };
3334         const uint32_t *all_ptypes;
3335         struct rte_eth_dev *dev;
3336         uint32_t unused_mask;
3337         unsigned int i, j;
3338         int ret;
3339
3340         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3341         dev = &rte_eth_devices[port_id];
3342
3343         if (num > 0 && set_ptypes == NULL)
3344                 return -EINVAL;
3345
3346         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3347                         *dev->dev_ops->dev_ptypes_set == NULL) {
3348                 ret = 0;
3349                 goto ptype_unknown;
3350         }
3351
3352         if (ptype_mask == 0) {
3353                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3354                                 ptype_mask);
3355                 goto ptype_unknown;
3356         }
3357
3358         unused_mask = ptype_mask;
3359         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3360                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3361                 if (mask && mask != valid_ptype_masks[i]) {
3362                         ret = -EINVAL;
3363                         goto ptype_unknown;
3364                 }
3365                 unused_mask &= ~valid_ptype_masks[i];
3366         }
3367
3368         if (unused_mask) {
3369                 ret = -EINVAL;
3370                 goto ptype_unknown;
3371         }
3372
3373         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3374         if (all_ptypes == NULL) {
3375                 ret = 0;
3376                 goto ptype_unknown;
3377         }
3378
3379         /*
3380          * Accommodate as many set_ptypes as possible. If the supplied
3381          * set_ptypes array is insufficient fill it partially.
3382          */
3383         for (i = 0, j = 0; set_ptypes != NULL &&
3384                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3385                 if (ptype_mask & all_ptypes[i]) {
3386                         if (j < num - 1) {
3387                                 set_ptypes[j] = all_ptypes[i];
3388                                 j++;
3389                                 continue;
3390                         }
3391                         break;
3392                 }
3393         }
3394
3395         if (set_ptypes != NULL && j < num)
3396                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3397
3398         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3399
3400 ptype_unknown:
3401         if (num > 0)
3402                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3403
3404         return ret;
3405 }
3406
3407 int
3408 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3409 {
3410         struct rte_eth_dev *dev;
3411
3412         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3413         dev = &rte_eth_devices[port_id];
3414         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3415
3416         return 0;
3417 }
3418
3419 int
3420 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3421 {
3422         struct rte_eth_dev *dev;
3423
3424         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3425
3426         dev = &rte_eth_devices[port_id];
3427         *mtu = dev->data->mtu;
3428         return 0;
3429 }
3430
3431 int
3432 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3433 {
3434         int ret;
3435         struct rte_eth_dev_info dev_info;
3436         struct rte_eth_dev *dev;
3437
3438         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3439         dev = &rte_eth_devices[port_id];
3440         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3441
3442         /*
3443          * Check if the device supports dev_infos_get, if it does not
3444          * skip min_mtu/max_mtu validation here as this requires values
3445          * that are populated within the call to rte_eth_dev_info_get()
3446          * which relies on dev->dev_ops->dev_infos_get.
3447          */
3448         if (*dev->dev_ops->dev_infos_get != NULL) {
3449                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3450                 if (ret != 0)
3451                         return ret;
3452
3453                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3454                         return -EINVAL;
3455         }
3456
3457         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3458         if (!ret)
3459                 dev->data->mtu = mtu;
3460
3461         return eth_err(port_id, ret);
3462 }
3463
3464 int
3465 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3466 {
3467         struct rte_eth_dev *dev;
3468         int ret;
3469
3470         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3471         dev = &rte_eth_devices[port_id];
3472         if (!(dev->data->dev_conf.rxmode.offloads &
3473               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3474                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3475                         port_id);
3476                 return -ENOSYS;
3477         }
3478
3479         if (vlan_id > 4095) {
3480                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3481                         port_id, vlan_id);
3482                 return -EINVAL;
3483         }
3484         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3485
3486         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3487         if (ret == 0) {
3488                 struct rte_vlan_filter_conf *vfc;
3489                 int vidx;
3490                 int vbit;
3491
3492                 vfc = &dev->data->vlan_filter_conf;
3493                 vidx = vlan_id / 64;
3494                 vbit = vlan_id % 64;
3495
3496                 if (on)
3497                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3498                 else
3499                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3500         }
3501
3502         return eth_err(port_id, ret);
3503 }
3504
3505 int
3506 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3507                                     int on)
3508 {
3509         struct rte_eth_dev *dev;
3510
3511         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3512         dev = &rte_eth_devices[port_id];
3513         if (rx_queue_id >= dev->data->nb_rx_queues) {
3514                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3515                 return -EINVAL;
3516         }
3517
3518         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3519         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3520
3521         return 0;
3522 }
3523
3524 int
3525 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3526                                 enum rte_vlan_type vlan_type,
3527                                 uint16_t tpid)
3528 {
3529         struct rte_eth_dev *dev;
3530
3531         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3532         dev = &rte_eth_devices[port_id];
3533         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3534
3535         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3536                                                                tpid));
3537 }
3538
3539 int
3540 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3541 {
3542         struct rte_eth_dev_info dev_info;
3543         struct rte_eth_dev *dev;
3544         int ret = 0;
3545         int mask = 0;
3546         int cur, org = 0;
3547         uint64_t orig_offloads;
3548         uint64_t dev_offloads;
3549         uint64_t new_offloads;
3550
3551         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3552         dev = &rte_eth_devices[port_id];
3553
3554         /* save original values in case of failure */
3555         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3556         dev_offloads = orig_offloads;
3557
3558         /* check which option changed by application */
3559         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3560         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3561         if (cur != org) {
3562                 if (cur)
3563                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3564                 else
3565                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3566                 mask |= ETH_VLAN_STRIP_MASK;
3567         }
3568
3569         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3570         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3571         if (cur != org) {
3572                 if (cur)
3573                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3574                 else
3575                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3576                 mask |= ETH_VLAN_FILTER_MASK;
3577         }
3578
3579         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3580         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3581         if (cur != org) {
3582                 if (cur)
3583                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3584                 else
3585                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3586                 mask |= ETH_VLAN_EXTEND_MASK;
3587         }
3588
3589         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3590         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3591         if (cur != org) {
3592                 if (cur)
3593                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3594                 else
3595                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3596                 mask |= ETH_QINQ_STRIP_MASK;
3597         }
3598
3599         /*no change*/
3600         if (mask == 0)
3601                 return ret;
3602
3603         ret = rte_eth_dev_info_get(port_id, &dev_info);
3604         if (ret != 0)
3605                 return ret;
3606
3607         /* Rx VLAN offloading must be within its device capabilities */
3608         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3609                 new_offloads = dev_offloads & ~orig_offloads;
3610                 RTE_ETHDEV_LOG(ERR,
3611                         "Ethdev port_id=%u requested new added VLAN offloads "
3612                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3613                         "0x%" PRIx64 " in %s()\n",
3614                         port_id, new_offloads, dev_info.rx_offload_capa,
3615                         __func__);
3616                 return -EINVAL;
3617         }
3618
3619         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3620         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3621         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3622         if (ret) {
3623                 /* hit an error restore  original values */
3624                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3625         }
3626
3627         return eth_err(port_id, ret);
3628 }
3629
3630 int
3631 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3632 {
3633         struct rte_eth_dev *dev;
3634         uint64_t *dev_offloads;
3635         int ret = 0;
3636
3637         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3638         dev = &rte_eth_devices[port_id];
3639         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3640
3641         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3642                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3643
3644         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3645                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3646
3647         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3648                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3649
3650         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3651                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3652
3653         return ret;
3654 }
3655
3656 int
3657 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3658 {
3659         struct rte_eth_dev *dev;
3660
3661         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3662         dev = &rte_eth_devices[port_id];
3663         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3664
3665         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3666 }
3667
3668 int
3669 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3670 {
3671         struct rte_eth_dev *dev;
3672
3673         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3674         dev = &rte_eth_devices[port_id];
3675         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3676         memset(fc_conf, 0, sizeof(*fc_conf));
3677         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3678 }
3679
3680 int
3681 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3682 {
3683         struct rte_eth_dev *dev;
3684
3685         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3686         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3687                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3688                 return -EINVAL;
3689         }
3690
3691         dev = &rte_eth_devices[port_id];
3692         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3693         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3694 }
3695
3696 int
3697 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3698                                    struct rte_eth_pfc_conf *pfc_conf)
3699 {
3700         struct rte_eth_dev *dev;
3701
3702         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3703         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3704                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3705                 return -EINVAL;
3706         }
3707
3708         dev = &rte_eth_devices[port_id];
3709         /* High water, low water validation are device specific */
3710         if  (*dev->dev_ops->priority_flow_ctrl_set)
3711                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3712                                         (dev, pfc_conf));
3713         return -ENOTSUP;
3714 }
3715
3716 static int
3717 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3718                         uint16_t reta_size)
3719 {
3720         uint16_t i, num;
3721
3722         if (!reta_conf)
3723                 return -EINVAL;
3724
3725         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3726         for (i = 0; i < num; i++) {
3727                 if (reta_conf[i].mask)
3728                         return 0;
3729         }
3730
3731         return -EINVAL;
3732 }
3733
3734 static int
3735 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3736                          uint16_t reta_size,
3737                          uint16_t max_rxq)
3738 {
3739         uint16_t i, idx, shift;
3740
3741         if (!reta_conf)
3742                 return -EINVAL;
3743
3744         if (max_rxq == 0) {
3745                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3746                 return -EINVAL;
3747         }
3748
3749         for (i = 0; i < reta_size; i++) {
3750                 idx = i / RTE_RETA_GROUP_SIZE;
3751                 shift = i % RTE_RETA_GROUP_SIZE;
3752                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3753                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3754                         RTE_ETHDEV_LOG(ERR,
3755                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3756                                 idx, shift,
3757                                 reta_conf[idx].reta[shift], max_rxq);
3758                         return -EINVAL;
3759                 }
3760         }
3761
3762         return 0;
3763 }
3764
3765 int
3766 rte_eth_dev_rss_reta_update(uint16_t port_id,
3767                             struct rte_eth_rss_reta_entry64 *reta_conf,
3768                             uint16_t reta_size)
3769 {
3770         struct rte_eth_dev *dev;
3771         int ret;
3772
3773         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3774         /* Check mask bits */
3775         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3776         if (ret < 0)
3777                 return ret;
3778
3779         dev = &rte_eth_devices[port_id];
3780
3781         /* Check entry value */
3782         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3783                                 dev->data->nb_rx_queues);
3784         if (ret < 0)
3785                 return ret;
3786
3787         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3788         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3789                                                              reta_size));
3790 }
3791
3792 int
3793 rte_eth_dev_rss_reta_query(uint16_t port_id,
3794                            struct rte_eth_rss_reta_entry64 *reta_conf,
3795                            uint16_t reta_size)
3796 {
3797         struct rte_eth_dev *dev;
3798         int ret;
3799
3800         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3801
3802         /* Check mask bits */
3803         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3804         if (ret < 0)
3805                 return ret;
3806
3807         dev = &rte_eth_devices[port_id];
3808         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3809         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3810                                                             reta_size));
3811 }
3812
3813 int
3814 rte_eth_dev_rss_hash_update(uint16_t port_id,
3815                             struct rte_eth_rss_conf *rss_conf)
3816 {
3817         struct rte_eth_dev *dev;
3818         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3819         int ret;
3820
3821         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3822
3823         ret = rte_eth_dev_info_get(port_id, &dev_info);
3824         if (ret != 0)
3825                 return ret;
3826
3827         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3828
3829         dev = &rte_eth_devices[port_id];
3830         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3831             dev_info.flow_type_rss_offloads) {
3832                 RTE_ETHDEV_LOG(ERR,
3833                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3834                         port_id, rss_conf->rss_hf,
3835                         dev_info.flow_type_rss_offloads);
3836                 return -EINVAL;
3837         }
3838         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3839         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3840                                                                  rss_conf));
3841 }
3842
3843 int
3844 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3845                               struct rte_eth_rss_conf *rss_conf)
3846 {
3847         struct rte_eth_dev *dev;
3848
3849         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3850         dev = &rte_eth_devices[port_id];
3851         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3852         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3853                                                                    rss_conf));
3854 }
3855
3856 int
3857 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3858                                 struct rte_eth_udp_tunnel *udp_tunnel)
3859 {
3860         struct rte_eth_dev *dev;
3861
3862         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3863         if (udp_tunnel == NULL) {
3864                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3865                 return -EINVAL;
3866         }
3867
3868         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3869                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3870                 return -EINVAL;
3871         }
3872
3873         dev = &rte_eth_devices[port_id];
3874         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3875         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3876                                                                 udp_tunnel));
3877 }
3878
3879 int
3880 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3881                                    struct rte_eth_udp_tunnel *udp_tunnel)
3882 {
3883         struct rte_eth_dev *dev;
3884
3885         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3886         dev = &rte_eth_devices[port_id];
3887
3888         if (udp_tunnel == NULL) {
3889                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3890                 return -EINVAL;
3891         }
3892
3893         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3894                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3895                 return -EINVAL;
3896         }
3897
3898         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3899         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3900                                                                 udp_tunnel));
3901 }
3902
3903 int
3904 rte_eth_led_on(uint16_t port_id)
3905 {
3906         struct rte_eth_dev *dev;
3907
3908         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3909         dev = &rte_eth_devices[port_id];
3910         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3911         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3912 }
3913
3914 int
3915 rte_eth_led_off(uint16_t port_id)
3916 {
3917         struct rte_eth_dev *dev;
3918
3919         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3920         dev = &rte_eth_devices[port_id];
3921         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3922         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3923 }
3924
3925 int
3926 rte_eth_fec_get_capability(uint16_t port_id,
3927                            struct rte_eth_fec_capa *speed_fec_capa,
3928                            unsigned int num)
3929 {
3930         struct rte_eth_dev *dev;
3931         int ret;
3932
3933         if (speed_fec_capa == NULL && num > 0)
3934                 return -EINVAL;
3935
3936         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3937         dev = &rte_eth_devices[port_id];
3938         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
3939         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
3940
3941         return ret;
3942 }
3943
3944 int
3945 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
3946 {
3947         struct rte_eth_dev *dev;
3948
3949         if (fec_capa == NULL)
3950                 return -EINVAL;
3951
3952         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3953         dev = &rte_eth_devices[port_id];
3954         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
3955         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
3956 }
3957
3958 int
3959 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
3960 {
3961         struct rte_eth_dev *dev;
3962
3963         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3964         dev = &rte_eth_devices[port_id];
3965         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
3966         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
3967 }
3968
3969 /*
3970  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3971  * an empty spot.
3972  */
3973 static int
3974 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3975 {
3976         struct rte_eth_dev_info dev_info;
3977         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3978         unsigned i;
3979         int ret;
3980
3981         ret = rte_eth_dev_info_get(port_id, &dev_info);
3982         if (ret != 0)
3983                 return -1;
3984
3985         for (i = 0; i < dev_info.max_mac_addrs; i++)
3986                 if (memcmp(addr, &dev->data->mac_addrs[i],
3987                                 RTE_ETHER_ADDR_LEN) == 0)
3988                         return i;
3989
3990         return -1;
3991 }
3992
3993 static const struct rte_ether_addr null_mac_addr;
3994
3995 int
3996 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3997                         uint32_t pool)
3998 {
3999         struct rte_eth_dev *dev;
4000         int index;
4001         uint64_t pool_mask;
4002         int ret;
4003
4004         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4005         dev = &rte_eth_devices[port_id];
4006         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4007
4008         if (rte_is_zero_ether_addr(addr)) {
4009                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4010                         port_id);
4011                 return -EINVAL;
4012         }
4013         if (pool >= ETH_64_POOLS) {
4014                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
4015                 return -EINVAL;
4016         }
4017
4018         index = get_mac_addr_index(port_id, addr);
4019         if (index < 0) {
4020                 index = get_mac_addr_index(port_id, &null_mac_addr);
4021                 if (index < 0) {
4022                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4023                                 port_id);
4024                         return -ENOSPC;
4025                 }
4026         } else {
4027                 pool_mask = dev->data->mac_pool_sel[index];
4028
4029                 /* Check if both MAC address and pool is already there, and do nothing */
4030                 if (pool_mask & (1ULL << pool))
4031                         return 0;
4032         }
4033
4034         /* Update NIC */
4035         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4036
4037         if (ret == 0) {
4038                 /* Update address in NIC data structure */
4039                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4040
4041                 /* Update pool bitmap in NIC data structure */
4042                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
4043         }
4044
4045         return eth_err(port_id, ret);
4046 }
4047
4048 int
4049 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4050 {
4051         struct rte_eth_dev *dev;
4052         int index;
4053
4054         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4055         dev = &rte_eth_devices[port_id];
4056         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4057
4058         index = get_mac_addr_index(port_id, addr);
4059         if (index == 0) {
4060                 RTE_ETHDEV_LOG(ERR,
4061                         "Port %u: Cannot remove default MAC address\n",
4062                         port_id);
4063                 return -EADDRINUSE;
4064         } else if (index < 0)
4065                 return 0;  /* Do nothing if address wasn't found */
4066
4067         /* Update NIC */
4068         (*dev->dev_ops->mac_addr_remove)(dev, index);
4069
4070         /* Update address in NIC data structure */
4071         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4072
4073         /* reset pool bitmap */
4074         dev->data->mac_pool_sel[index] = 0;
4075
4076         return 0;
4077 }
4078
4079 int
4080 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4081 {
4082         struct rte_eth_dev *dev;
4083         int ret;
4084
4085         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4086
4087         if (!rte_is_valid_assigned_ether_addr(addr))
4088                 return -EINVAL;
4089
4090         dev = &rte_eth_devices[port_id];
4091         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4092
4093         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4094         if (ret < 0)
4095                 return ret;
4096
4097         /* Update default address in NIC data structure */
4098         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4099
4100         return 0;
4101 }
4102
4103
4104 /*
4105  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4106  * an empty spot.
4107  */
4108 static int
4109 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4110 {
4111         struct rte_eth_dev_info dev_info;
4112         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4113         unsigned i;
4114         int ret;
4115
4116         ret = rte_eth_dev_info_get(port_id, &dev_info);
4117         if (ret != 0)
4118                 return -1;
4119
4120         if (!dev->data->hash_mac_addrs)
4121                 return -1;
4122
4123         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4124                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4125                         RTE_ETHER_ADDR_LEN) == 0)
4126                         return i;
4127
4128         return -1;
4129 }
4130
4131 int
4132 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4133                                 uint8_t on)
4134 {
4135         int index;
4136         int ret;
4137         struct rte_eth_dev *dev;
4138
4139         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4140
4141         dev = &rte_eth_devices[port_id];
4142         if (rte_is_zero_ether_addr(addr)) {
4143                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4144                         port_id);
4145                 return -EINVAL;
4146         }
4147
4148         index = get_hash_mac_addr_index(port_id, addr);
4149         /* Check if it's already there, and do nothing */
4150         if ((index >= 0) && on)
4151                 return 0;
4152
4153         if (index < 0) {
4154                 if (!on) {
4155                         RTE_ETHDEV_LOG(ERR,
4156                                 "Port %u: the MAC address was not set in UTA\n",
4157                                 port_id);
4158                         return -EINVAL;
4159                 }
4160
4161                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
4162                 if (index < 0) {
4163                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4164                                 port_id);
4165                         return -ENOSPC;
4166                 }
4167         }
4168
4169         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4170         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4171         if (ret == 0) {
4172                 /* Update address in NIC data structure */
4173                 if (on)
4174                         rte_ether_addr_copy(addr,
4175                                         &dev->data->hash_mac_addrs[index]);
4176                 else
4177                         rte_ether_addr_copy(&null_mac_addr,
4178                                         &dev->data->hash_mac_addrs[index]);
4179         }
4180
4181         return eth_err(port_id, ret);
4182 }
4183
4184 int
4185 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4186 {
4187         struct rte_eth_dev *dev;
4188
4189         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4190
4191         dev = &rte_eth_devices[port_id];
4192
4193         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4194         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4195                                                                        on));
4196 }
4197
4198 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4199                                         uint16_t tx_rate)
4200 {
4201         struct rte_eth_dev *dev;
4202         struct rte_eth_dev_info dev_info;
4203         struct rte_eth_link link;
4204         int ret;
4205
4206         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4207
4208         ret = rte_eth_dev_info_get(port_id, &dev_info);
4209         if (ret != 0)
4210                 return ret;
4211
4212         dev = &rte_eth_devices[port_id];
4213         link = dev->data->dev_link;
4214
4215         if (queue_idx > dev_info.max_tx_queues) {
4216                 RTE_ETHDEV_LOG(ERR,
4217                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4218                         port_id, queue_idx);
4219                 return -EINVAL;
4220         }
4221
4222         if (tx_rate > link.link_speed) {
4223                 RTE_ETHDEV_LOG(ERR,
4224                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4225                         tx_rate, link.link_speed);
4226                 return -EINVAL;
4227         }
4228
4229         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4230         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4231                                                         queue_idx, tx_rate));
4232 }
4233
4234 int
4235 rte_eth_mirror_rule_set(uint16_t port_id,
4236                         struct rte_eth_mirror_conf *mirror_conf,
4237                         uint8_t rule_id, uint8_t on)
4238 {
4239         struct rte_eth_dev *dev;
4240
4241         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4242         if (mirror_conf->rule_type == 0) {
4243                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4244                 return -EINVAL;
4245         }
4246
4247         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4248                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4249                         ETH_64_POOLS - 1);
4250                 return -EINVAL;
4251         }
4252
4253         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4254              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4255             (mirror_conf->pool_mask == 0)) {
4256                 RTE_ETHDEV_LOG(ERR,
4257                         "Invalid mirror pool, pool mask can not be 0\n");
4258                 return -EINVAL;
4259         }
4260
4261         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4262             mirror_conf->vlan.vlan_mask == 0) {
4263                 RTE_ETHDEV_LOG(ERR,
4264                         "Invalid vlan mask, vlan mask can not be 0\n");
4265                 return -EINVAL;
4266         }
4267
4268         dev = &rte_eth_devices[port_id];
4269         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4270
4271         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4272                                                 mirror_conf, rule_id, on));
4273 }
4274
4275 int
4276 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4277 {
4278         struct rte_eth_dev *dev;
4279
4280         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4281
4282         dev = &rte_eth_devices[port_id];
4283         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4284
4285         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
4286                                                                    rule_id));
4287 }
4288
4289 RTE_INIT(eth_dev_init_cb_lists)
4290 {
4291         int i;
4292
4293         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4294                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4295 }
4296
4297 int
4298 rte_eth_dev_callback_register(uint16_t port_id,
4299                         enum rte_eth_event_type event,
4300                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4301 {
4302         struct rte_eth_dev *dev;
4303         struct rte_eth_dev_callback *user_cb;
4304         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4305         uint16_t last_port;
4306
4307         if (!cb_fn)
4308                 return -EINVAL;
4309
4310         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4311                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4312                 return -EINVAL;
4313         }
4314
4315         if (port_id == RTE_ETH_ALL) {
4316                 next_port = 0;
4317                 last_port = RTE_MAX_ETHPORTS - 1;
4318         } else {
4319                 next_port = last_port = port_id;
4320         }
4321
4322         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4323
4324         do {
4325                 dev = &rte_eth_devices[next_port];
4326
4327                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4328                         if (user_cb->cb_fn == cb_fn &&
4329                                 user_cb->cb_arg == cb_arg &&
4330                                 user_cb->event == event) {
4331                                 break;
4332                         }
4333                 }
4334
4335                 /* create a new callback. */
4336                 if (user_cb == NULL) {
4337                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4338                                 sizeof(struct rte_eth_dev_callback), 0);
4339                         if (user_cb != NULL) {
4340                                 user_cb->cb_fn = cb_fn;
4341                                 user_cb->cb_arg = cb_arg;
4342                                 user_cb->event = event;
4343                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4344                                                   user_cb, next);
4345                         } else {
4346                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4347                                 rte_eth_dev_callback_unregister(port_id, event,
4348                                                                 cb_fn, cb_arg);
4349                                 return -ENOMEM;
4350                         }
4351
4352                 }
4353         } while (++next_port <= last_port);
4354
4355         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4356         return 0;
4357 }
4358
4359 int
4360 rte_eth_dev_callback_unregister(uint16_t port_id,
4361                         enum rte_eth_event_type event,
4362                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4363 {
4364         int ret;
4365         struct rte_eth_dev *dev;
4366         struct rte_eth_dev_callback *cb, *next;
4367         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4368         uint16_t last_port;
4369
4370         if (!cb_fn)
4371                 return -EINVAL;
4372
4373         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4374                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4375                 return -EINVAL;
4376         }
4377
4378         if (port_id == RTE_ETH_ALL) {
4379                 next_port = 0;
4380                 last_port = RTE_MAX_ETHPORTS - 1;
4381         } else {
4382                 next_port = last_port = port_id;
4383         }
4384
4385         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4386
4387         do {
4388                 dev = &rte_eth_devices[next_port];
4389                 ret = 0;
4390                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4391                      cb = next) {
4392
4393                         next = TAILQ_NEXT(cb, next);
4394
4395                         if (cb->cb_fn != cb_fn || cb->event != event ||
4396                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4397                                 continue;
4398
4399                         /*
4400                          * if this callback is not executing right now,
4401                          * then remove it.
4402                          */
4403                         if (cb->active == 0) {
4404                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4405                                 rte_free(cb);
4406                         } else {
4407                                 ret = -EAGAIN;
4408                         }
4409                 }
4410         } while (++next_port <= last_port);
4411
4412         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4413         return ret;
4414 }
4415
4416 int
4417 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4418         enum rte_eth_event_type event, void *ret_param)
4419 {
4420         struct rte_eth_dev_callback *cb_lst;
4421         struct rte_eth_dev_callback dev_cb;
4422         int rc = 0;
4423
4424         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4425         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4426                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4427                         continue;
4428                 dev_cb = *cb_lst;
4429                 cb_lst->active = 1;
4430                 if (ret_param != NULL)
4431                         dev_cb.ret_param = ret_param;
4432
4433                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4434                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4435                                 dev_cb.cb_arg, dev_cb.ret_param);
4436                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4437                 cb_lst->active = 0;
4438         }
4439         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4440         return rc;
4441 }
4442
4443 void
4444 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4445 {
4446         if (dev == NULL)
4447                 return;
4448
4449         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4450
4451         dev->state = RTE_ETH_DEV_ATTACHED;
4452 }
4453
4454 int
4455 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4456 {
4457         uint32_t vec;
4458         struct rte_eth_dev *dev;
4459         struct rte_intr_handle *intr_handle;
4460         uint16_t qid;
4461         int rc;
4462
4463         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4464
4465         dev = &rte_eth_devices[port_id];
4466
4467         if (!dev->intr_handle) {
4468                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4469                 return -ENOTSUP;
4470         }
4471
4472         intr_handle = dev->intr_handle;
4473         if (!intr_handle->intr_vec) {
4474                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4475                 return -EPERM;
4476         }
4477
4478         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4479                 vec = intr_handle->intr_vec[qid];
4480                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4481                 if (rc && rc != -EEXIST) {
4482                         RTE_ETHDEV_LOG(ERR,
4483                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4484                                 port_id, qid, op, epfd, vec);
4485                 }
4486         }
4487
4488         return 0;
4489 }
4490
4491 int
4492 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4493 {
4494         struct rte_intr_handle *intr_handle;
4495         struct rte_eth_dev *dev;
4496         unsigned int efd_idx;
4497         uint32_t vec;
4498         int fd;
4499
4500         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4501
4502         dev = &rte_eth_devices[port_id];
4503
4504         if (queue_id >= dev->data->nb_rx_queues) {
4505                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4506                 return -1;
4507         }
4508
4509         if (!dev->intr_handle) {
4510                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4511                 return -1;
4512         }
4513
4514         intr_handle = dev->intr_handle;
4515         if (!intr_handle->intr_vec) {
4516                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4517                 return -1;
4518         }
4519
4520         vec = intr_handle->intr_vec[queue_id];
4521         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4522                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4523         fd = intr_handle->efds[efd_idx];
4524
4525         return fd;
4526 }
4527
4528 static inline int
4529 eth_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4530                 const char *ring_name)
4531 {
4532         return snprintf(name, len, "eth_p%d_q%d_%s",
4533                         port_id, queue_id, ring_name);
4534 }
4535
4536 const struct rte_memzone *
4537 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4538                          uint16_t queue_id, size_t size, unsigned align,
4539                          int socket_id)
4540 {
4541         char z_name[RTE_MEMZONE_NAMESIZE];
4542         const struct rte_memzone *mz;
4543         int rc;
4544
4545         rc = eth_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4546                         queue_id, ring_name);
4547         if (rc >= RTE_MEMZONE_NAMESIZE) {
4548                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4549                 rte_errno = ENAMETOOLONG;
4550                 return NULL;
4551         }
4552
4553         mz = rte_memzone_lookup(z_name);
4554         if (mz) {
4555                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4556                                 size > mz->len ||
4557                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4558                         RTE_ETHDEV_LOG(ERR,
4559                                 "memzone %s does not justify the requested attributes\n",
4560                                 mz->name);
4561                         return NULL;
4562                 }
4563
4564                 return mz;
4565         }
4566
4567         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4568                         RTE_MEMZONE_IOVA_CONTIG, align);
4569 }
4570
4571 int
4572 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4573                 uint16_t queue_id)
4574 {
4575         char z_name[RTE_MEMZONE_NAMESIZE];
4576         const struct rte_memzone *mz;
4577         int rc = 0;
4578
4579         rc = eth_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4580                         queue_id, ring_name);
4581         if (rc >= RTE_MEMZONE_NAMESIZE) {
4582                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4583                 return -ENAMETOOLONG;
4584         }
4585
4586         mz = rte_memzone_lookup(z_name);
4587         if (mz)
4588                 rc = rte_memzone_free(mz);
4589         else
4590                 rc = -ENOENT;
4591
4592         return rc;
4593 }
4594
4595 int
4596 rte_eth_dev_create(struct rte_device *device, const char *name,
4597         size_t priv_data_size,
4598         ethdev_bus_specific_init ethdev_bus_specific_init,
4599         void *bus_init_params,
4600         ethdev_init_t ethdev_init, void *init_params)
4601 {
4602         struct rte_eth_dev *ethdev;
4603         int retval;
4604
4605         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4606
4607         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4608                 ethdev = rte_eth_dev_allocate(name);
4609                 if (!ethdev)
4610                         return -ENODEV;
4611
4612                 if (priv_data_size) {
4613                         ethdev->data->dev_private = rte_zmalloc_socket(
4614                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4615                                 device->numa_node);
4616
4617                         if (!ethdev->data->dev_private) {
4618                                 RTE_ETHDEV_LOG(ERR,
4619                                         "failed to allocate private data\n");
4620                                 retval = -ENOMEM;
4621                                 goto probe_failed;
4622                         }
4623                 }
4624         } else {
4625                 ethdev = rte_eth_dev_attach_secondary(name);
4626                 if (!ethdev) {
4627                         RTE_ETHDEV_LOG(ERR,
4628                                 "secondary process attach failed, ethdev doesn't exist\n");
4629                         return  -ENODEV;
4630                 }
4631         }
4632
4633         ethdev->device = device;
4634
4635         if (ethdev_bus_specific_init) {
4636                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4637                 if (retval) {
4638                         RTE_ETHDEV_LOG(ERR,
4639                                 "ethdev bus specific initialisation failed\n");
4640                         goto probe_failed;
4641                 }
4642         }
4643
4644         retval = ethdev_init(ethdev, init_params);
4645         if (retval) {
4646                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4647                 goto probe_failed;
4648         }
4649
4650         rte_eth_dev_probing_finish(ethdev);
4651
4652         return retval;
4653
4654 probe_failed:
4655         rte_eth_dev_release_port(ethdev);
4656         return retval;
4657 }
4658
4659 int
4660 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4661         ethdev_uninit_t ethdev_uninit)
4662 {
4663         int ret;
4664
4665         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4666         if (!ethdev)
4667                 return -ENODEV;
4668
4669         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4670
4671         ret = ethdev_uninit(ethdev);
4672         if (ret)
4673                 return ret;
4674
4675         return rte_eth_dev_release_port(ethdev);
4676 }
4677
4678 int
4679 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4680                           int epfd, int op, void *data)
4681 {
4682         uint32_t vec;
4683         struct rte_eth_dev *dev;
4684         struct rte_intr_handle *intr_handle;
4685         int rc;
4686
4687         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4688
4689         dev = &rte_eth_devices[port_id];
4690         if (queue_id >= dev->data->nb_rx_queues) {
4691                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4692                 return -EINVAL;
4693         }
4694
4695         if (!dev->intr_handle) {
4696                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4697                 return -ENOTSUP;
4698         }
4699
4700         intr_handle = dev->intr_handle;
4701         if (!intr_handle->intr_vec) {
4702                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4703                 return -EPERM;
4704         }
4705
4706         vec = intr_handle->intr_vec[queue_id];
4707         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4708         if (rc && rc != -EEXIST) {
4709                 RTE_ETHDEV_LOG(ERR,
4710                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4711                         port_id, queue_id, op, epfd, vec);
4712                 return rc;
4713         }
4714
4715         return 0;
4716 }
4717
4718 int
4719 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4720                            uint16_t queue_id)
4721 {
4722         struct rte_eth_dev *dev;
4723         int ret;
4724
4725         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4726
4727         dev = &rte_eth_devices[port_id];
4728
4729         ret = eth_dev_validate_rx_queue(dev, queue_id);
4730         if (ret != 0)
4731                 return ret;
4732
4733         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4734         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4735                                                                 queue_id));
4736 }
4737
4738 int
4739 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4740                             uint16_t queue_id)
4741 {
4742         struct rte_eth_dev *dev;
4743         int ret;
4744
4745         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4746
4747         dev = &rte_eth_devices[port_id];
4748
4749         ret = eth_dev_validate_rx_queue(dev, queue_id);
4750         if (ret != 0)
4751                 return ret;
4752
4753         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4754         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4755                                                                 queue_id));
4756 }
4757
4758
4759 int
4760 rte_eth_dev_filter_supported(uint16_t port_id,
4761                              enum rte_filter_type filter_type)
4762 {
4763         struct rte_eth_dev *dev;
4764
4765         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4766
4767         dev = &rte_eth_devices[port_id];
4768         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4769         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4770                                 RTE_ETH_FILTER_NOP, NULL);
4771 }
4772
4773 int
4774 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4775                         enum rte_filter_op filter_op, void *arg)
4776 {
4777         struct rte_eth_dev *dev;
4778
4779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4780
4781         dev = &rte_eth_devices[port_id];
4782         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4783         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4784                                                              filter_op, arg));
4785 }
4786
4787 const struct rte_eth_rxtx_callback *
4788 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4789                 rte_rx_callback_fn fn, void *user_param)
4790 {
4791 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4792         rte_errno = ENOTSUP;
4793         return NULL;
4794 #endif
4795         struct rte_eth_dev *dev;
4796
4797         /* check input parameters */
4798         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4799                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4800                 rte_errno = EINVAL;
4801                 return NULL;
4802         }
4803         dev = &rte_eth_devices[port_id];
4804         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4805                 rte_errno = EINVAL;
4806                 return NULL;
4807         }
4808         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4809
4810         if (cb == NULL) {
4811                 rte_errno = ENOMEM;
4812                 return NULL;
4813         }
4814
4815         cb->fn.rx = fn;
4816         cb->param = user_param;
4817
4818         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4819         /* Add the callbacks in fifo order. */
4820         struct rte_eth_rxtx_callback *tail =
4821                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4822
4823         if (!tail) {
4824                 /* Stores to cb->fn and cb->param should complete before
4825                  * cb is visible to data plane.
4826                  */
4827                 __atomic_store_n(
4828                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4829                         cb, __ATOMIC_RELEASE);
4830
4831         } else {
4832                 while (tail->next)
4833                         tail = tail->next;
4834                 /* Stores to cb->fn and cb->param should complete before
4835                  * cb is visible to data plane.
4836                  */
4837                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4838         }
4839         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4840
4841         return cb;
4842 }
4843
4844 const struct rte_eth_rxtx_callback *
4845 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4846                 rte_rx_callback_fn fn, void *user_param)
4847 {
4848 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4849         rte_errno = ENOTSUP;
4850         return NULL;
4851 #endif
4852         /* check input parameters */
4853         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4854                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4855                 rte_errno = EINVAL;
4856                 return NULL;
4857         }
4858
4859         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4860
4861         if (cb == NULL) {
4862                 rte_errno = ENOMEM;
4863                 return NULL;
4864         }
4865
4866         cb->fn.rx = fn;
4867         cb->param = user_param;
4868
4869         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4870         /* Add the callbacks at first position */
4871         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4872         /* Stores to cb->fn, cb->param and cb->next should complete before
4873          * cb is visible to data plane threads.
4874          */
4875         __atomic_store_n(
4876                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4877                 cb, __ATOMIC_RELEASE);
4878         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4879
4880         return cb;
4881 }
4882
4883 const struct rte_eth_rxtx_callback *
4884 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4885                 rte_tx_callback_fn fn, void *user_param)
4886 {
4887 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4888         rte_errno = ENOTSUP;
4889         return NULL;
4890 #endif
4891         struct rte_eth_dev *dev;
4892
4893         /* check input parameters */
4894         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4895                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4896                 rte_errno = EINVAL;
4897                 return NULL;
4898         }
4899
4900         dev = &rte_eth_devices[port_id];
4901         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4902                 rte_errno = EINVAL;
4903                 return NULL;
4904         }
4905
4906         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4907
4908         if (cb == NULL) {
4909                 rte_errno = ENOMEM;
4910                 return NULL;
4911         }
4912
4913         cb->fn.tx = fn;
4914         cb->param = user_param;
4915
4916         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4917         /* Add the callbacks in fifo order. */
4918         struct rte_eth_rxtx_callback *tail =
4919                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4920
4921         if (!tail) {
4922                 /* Stores to cb->fn and cb->param should complete before
4923                  * cb is visible to data plane.
4924                  */
4925                 __atomic_store_n(
4926                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4927                         cb, __ATOMIC_RELEASE);
4928
4929         } else {
4930                 while (tail->next)
4931                         tail = tail->next;
4932                 /* Stores to cb->fn and cb->param should complete before
4933                  * cb is visible to data plane.
4934                  */
4935                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4936         }
4937         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4938
4939         return cb;
4940 }
4941
4942 int
4943 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4944                 const struct rte_eth_rxtx_callback *user_cb)
4945 {
4946 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4947         return -ENOTSUP;
4948 #endif
4949         /* Check input parameters. */
4950         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4951         if (user_cb == NULL ||
4952                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4953                 return -EINVAL;
4954
4955         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4956         struct rte_eth_rxtx_callback *cb;
4957         struct rte_eth_rxtx_callback **prev_cb;
4958         int ret = -EINVAL;
4959
4960         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4961         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4962         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4963                 cb = *prev_cb;
4964                 if (cb == user_cb) {
4965                         /* Remove the user cb from the callback list. */
4966                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4967                         ret = 0;
4968                         break;
4969                 }
4970         }
4971         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4972
4973         return ret;
4974 }
4975
4976 int
4977 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4978                 const struct rte_eth_rxtx_callback *user_cb)
4979 {
4980 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4981         return -ENOTSUP;
4982 #endif
4983         /* Check input parameters. */
4984         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4985         if (user_cb == NULL ||
4986                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4987                 return -EINVAL;
4988
4989         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4990         int ret = -EINVAL;
4991         struct rte_eth_rxtx_callback *cb;
4992         struct rte_eth_rxtx_callback **prev_cb;
4993
4994         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4995         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4996         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4997                 cb = *prev_cb;
4998                 if (cb == user_cb) {
4999                         /* Remove the user cb from the callback list. */
5000                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5001                         ret = 0;
5002                         break;
5003                 }
5004         }
5005         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
5006
5007         return ret;
5008 }
5009
5010 int
5011 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5012         struct rte_eth_rxq_info *qinfo)
5013 {
5014         struct rte_eth_dev *dev;
5015
5016         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5017
5018         if (qinfo == NULL)
5019                 return -EINVAL;
5020
5021         dev = &rte_eth_devices[port_id];
5022         if (queue_id >= dev->data->nb_rx_queues) {
5023                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5024                 return -EINVAL;
5025         }
5026
5027         if (dev->data->rx_queues == NULL ||
5028                         dev->data->rx_queues[queue_id] == NULL) {
5029                 RTE_ETHDEV_LOG(ERR,
5030                                "Rx queue %"PRIu16" of device with port_id=%"
5031                                PRIu16" has not been setup\n",
5032                                queue_id, port_id);
5033                 return -EINVAL;
5034         }
5035
5036         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5037                 RTE_ETHDEV_LOG(INFO,
5038                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5039                         queue_id, port_id);
5040                 return -EINVAL;
5041         }
5042
5043         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5044
5045         memset(qinfo, 0, sizeof(*qinfo));
5046         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5047         return 0;
5048 }
5049
5050 int
5051 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5052         struct rte_eth_txq_info *qinfo)
5053 {
5054         struct rte_eth_dev *dev;
5055
5056         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5057
5058         if (qinfo == NULL)
5059                 return -EINVAL;
5060
5061         dev = &rte_eth_devices[port_id];
5062         if (queue_id >= dev->data->nb_tx_queues) {
5063                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5064                 return -EINVAL;
5065         }
5066
5067         if (dev->data->tx_queues == NULL ||
5068                         dev->data->tx_queues[queue_id] == NULL) {
5069                 RTE_ETHDEV_LOG(ERR,
5070                                "Tx queue %"PRIu16" of device with port_id=%"
5071                                PRIu16" has not been setup\n",
5072                                queue_id, port_id);
5073                 return -EINVAL;
5074         }
5075
5076         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5077                 RTE_ETHDEV_LOG(INFO,
5078                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5079                         queue_id, port_id);
5080                 return -EINVAL;
5081         }
5082
5083         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5084
5085         memset(qinfo, 0, sizeof(*qinfo));
5086         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5087
5088         return 0;
5089 }
5090
5091 int
5092 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5093                           struct rte_eth_burst_mode *mode)
5094 {
5095         struct rte_eth_dev *dev;
5096
5097         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5098
5099         if (mode == NULL)
5100                 return -EINVAL;
5101
5102         dev = &rte_eth_devices[port_id];
5103
5104         if (queue_id >= dev->data->nb_rx_queues) {
5105                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5106                 return -EINVAL;
5107         }
5108
5109         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5110         memset(mode, 0, sizeof(*mode));
5111         return eth_err(port_id,
5112                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5113 }
5114
5115 int
5116 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5117                           struct rte_eth_burst_mode *mode)
5118 {
5119         struct rte_eth_dev *dev;
5120
5121         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5122
5123         if (mode == NULL)
5124                 return -EINVAL;
5125
5126         dev = &rte_eth_devices[port_id];
5127
5128         if (queue_id >= dev->data->nb_tx_queues) {
5129                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5130                 return -EINVAL;
5131         }
5132
5133         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5134         memset(mode, 0, sizeof(*mode));
5135         return eth_err(port_id,
5136                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5137 }
5138
5139 int
5140 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5141                              struct rte_ether_addr *mc_addr_set,
5142                              uint32_t nb_mc_addr)
5143 {
5144         struct rte_eth_dev *dev;
5145
5146         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5147
5148         dev = &rte_eth_devices[port_id];
5149         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5150         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5151                                                 mc_addr_set, nb_mc_addr));
5152 }
5153
5154 int
5155 rte_eth_timesync_enable(uint16_t port_id)
5156 {
5157         struct rte_eth_dev *dev;
5158
5159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5160         dev = &rte_eth_devices[port_id];
5161
5162         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5163         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5164 }
5165
5166 int
5167 rte_eth_timesync_disable(uint16_t port_id)
5168 {
5169         struct rte_eth_dev *dev;
5170
5171         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5172         dev = &rte_eth_devices[port_id];
5173
5174         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5175         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5176 }
5177
5178 int
5179 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5180                                    uint32_t flags)
5181 {
5182         struct rte_eth_dev *dev;
5183
5184         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5185         dev = &rte_eth_devices[port_id];
5186
5187         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5188         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5189                                 (dev, timestamp, flags));
5190 }
5191
5192 int
5193 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5194                                    struct timespec *timestamp)
5195 {
5196         struct rte_eth_dev *dev;
5197
5198         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5199         dev = &rte_eth_devices[port_id];
5200
5201         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5202         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5203                                 (dev, timestamp));
5204 }
5205
5206 int
5207 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5208 {
5209         struct rte_eth_dev *dev;
5210
5211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5212         dev = &rte_eth_devices[port_id];
5213
5214         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5215         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
5216                                                                       delta));
5217 }
5218
5219 int
5220 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5221 {
5222         struct rte_eth_dev *dev;
5223
5224         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5225         dev = &rte_eth_devices[port_id];
5226
5227         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5228         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5229                                                                 timestamp));
5230 }
5231
5232 int
5233 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5234 {
5235         struct rte_eth_dev *dev;
5236
5237         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5238         dev = &rte_eth_devices[port_id];
5239
5240         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5241         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5242                                                                 timestamp));
5243 }
5244
5245 int
5246 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5247 {
5248         struct rte_eth_dev *dev;
5249
5250         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5251         dev = &rte_eth_devices[port_id];
5252
5253         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5254         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5255 }
5256
5257 int
5258 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5259 {
5260         struct rte_eth_dev *dev;
5261
5262         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5263
5264         dev = &rte_eth_devices[port_id];
5265         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5266         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5267 }
5268
5269 int
5270 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5271 {
5272         struct rte_eth_dev *dev;
5273
5274         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5275
5276         dev = &rte_eth_devices[port_id];
5277         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5278         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5279 }
5280
5281 int
5282 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5283 {
5284         struct rte_eth_dev *dev;
5285
5286         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5287
5288         dev = &rte_eth_devices[port_id];
5289         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5290         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5291 }
5292
5293 int
5294 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5295 {
5296         struct rte_eth_dev *dev;
5297
5298         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5299
5300         dev = &rte_eth_devices[port_id];
5301         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5302         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5303 }
5304
5305 int
5306 rte_eth_dev_get_module_info(uint16_t port_id,
5307                             struct rte_eth_dev_module_info *modinfo)
5308 {
5309         struct rte_eth_dev *dev;
5310
5311         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5312
5313         dev = &rte_eth_devices[port_id];
5314         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5315         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5316 }
5317
5318 int
5319 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5320                               struct rte_dev_eeprom_info *info)
5321 {
5322         struct rte_eth_dev *dev;
5323
5324         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5325
5326         dev = &rte_eth_devices[port_id];
5327         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5328         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5329 }
5330
5331 int
5332 rte_eth_dev_get_dcb_info(uint16_t port_id,
5333                              struct rte_eth_dcb_info *dcb_info)
5334 {
5335         struct rte_eth_dev *dev;
5336
5337         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5338
5339         dev = &rte_eth_devices[port_id];
5340         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5341
5342         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5343         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5344 }
5345
5346 int
5347 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
5348                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
5349 {
5350         struct rte_eth_dev *dev;
5351
5352         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5353         if (l2_tunnel == NULL) {
5354                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
5355                 return -EINVAL;
5356         }
5357
5358         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
5359                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
5360                 return -EINVAL;
5361         }
5362
5363         dev = &rte_eth_devices[port_id];
5364         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
5365                                 -ENOTSUP);
5366         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
5367                                                                 l2_tunnel));
5368 }
5369
5370 int
5371 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
5372                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
5373                                   uint32_t mask,
5374                                   uint8_t en)
5375 {
5376         struct rte_eth_dev *dev;
5377
5378         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5379
5380         if (l2_tunnel == NULL) {
5381                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
5382                 return -EINVAL;
5383         }
5384
5385         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
5386                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
5387                 return -EINVAL;
5388         }
5389
5390         if (mask == 0) {
5391                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
5392                 return -EINVAL;
5393         }
5394
5395         dev = &rte_eth_devices[port_id];
5396         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
5397                                 -ENOTSUP);
5398         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
5399                                                         l2_tunnel, mask, en));
5400 }
5401
5402 static void
5403 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5404                            const struct rte_eth_desc_lim *desc_lim)
5405 {
5406         if (desc_lim->nb_align != 0)
5407                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5408
5409         if (desc_lim->nb_max != 0)
5410                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5411
5412         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5413 }
5414
5415 int
5416 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5417                                  uint16_t *nb_rx_desc,
5418                                  uint16_t *nb_tx_desc)
5419 {
5420         struct rte_eth_dev_info dev_info;
5421         int ret;
5422
5423         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5424
5425         ret = rte_eth_dev_info_get(port_id, &dev_info);
5426         if (ret != 0)
5427                 return ret;
5428
5429         if (nb_rx_desc != NULL)
5430                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5431
5432         if (nb_tx_desc != NULL)
5433                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5434
5435         return 0;
5436 }
5437
5438 int
5439 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5440                                    struct rte_eth_hairpin_cap *cap)
5441 {
5442         struct rte_eth_dev *dev;
5443
5444         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
5445
5446         dev = &rte_eth_devices[port_id];
5447         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5448         memset(cap, 0, sizeof(*cap));
5449         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5450 }
5451
5452 int
5453 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5454 {
5455         if (dev->data->rx_queue_state[queue_id] ==
5456             RTE_ETH_QUEUE_STATE_HAIRPIN)
5457                 return 1;
5458         return 0;
5459 }
5460
5461 int
5462 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5463 {
5464         if (dev->data->tx_queue_state[queue_id] ==
5465             RTE_ETH_QUEUE_STATE_HAIRPIN)
5466                 return 1;
5467         return 0;
5468 }
5469
5470 int
5471 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5472 {
5473         struct rte_eth_dev *dev;
5474
5475         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5476
5477         if (pool == NULL)
5478                 return -EINVAL;
5479
5480         dev = &rte_eth_devices[port_id];
5481
5482         if (*dev->dev_ops->pool_ops_supported == NULL)
5483                 return 1; /* all pools are supported */
5484
5485         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5486 }
5487
5488 /**
5489  * A set of values to describe the possible states of a switch domain.
5490  */
5491 enum rte_eth_switch_domain_state {
5492         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5493         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5494 };
5495
5496 /**
5497  * Array of switch domains available for allocation. Array is sized to
5498  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5499  * ethdev ports in a single process.
5500  */
5501 static struct rte_eth_dev_switch {
5502         enum rte_eth_switch_domain_state state;
5503 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
5504
5505 int
5506 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5507 {
5508         unsigned int i;
5509
5510         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5511
5512         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5513                 if (rte_eth_switch_domains[i].state ==
5514                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5515                         rte_eth_switch_domains[i].state =
5516                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5517                         *domain_id = i;
5518                         return 0;
5519                 }
5520         }
5521
5522         return -ENOSPC;
5523 }
5524
5525 int
5526 rte_eth_switch_domain_free(uint16_t domain_id)
5527 {
5528         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5529                 domain_id >= RTE_MAX_ETHPORTS)
5530                 return -EINVAL;
5531
5532         if (rte_eth_switch_domains[domain_id].state !=
5533                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5534                 return -EINVAL;
5535
5536         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5537
5538         return 0;
5539 }
5540
5541 static int
5542 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5543 {
5544         int state;
5545         struct rte_kvargs_pair *pair;
5546         char *letter;
5547
5548         arglist->str = strdup(str_in);
5549         if (arglist->str == NULL)
5550                 return -ENOMEM;
5551
5552         letter = arglist->str;
5553         state = 0;
5554         arglist->count = 0;
5555         pair = &arglist->pairs[0];
5556         while (1) {
5557                 switch (state) {
5558                 case 0: /* Initial */
5559                         if (*letter == '=')
5560                                 return -EINVAL;
5561                         else if (*letter == '\0')
5562                                 return 0;
5563
5564                         state = 1;
5565                         pair->key = letter;
5566                         /* fall-thru */
5567
5568                 case 1: /* Parsing key */
5569                         if (*letter == '=') {
5570                                 *letter = '\0';
5571                                 pair->value = letter + 1;
5572                                 state = 2;
5573                         } else if (*letter == ',' || *letter == '\0')
5574                                 return -EINVAL;
5575                         break;
5576
5577
5578                 case 2: /* Parsing value */
5579                         if (*letter == '[')
5580                                 state = 3;
5581                         else if (*letter == ',') {
5582                                 *letter = '\0';
5583                                 arglist->count++;
5584                                 pair = &arglist->pairs[arglist->count];
5585                                 state = 0;
5586                         } else if (*letter == '\0') {
5587                                 letter--;
5588                                 arglist->count++;
5589                                 pair = &arglist->pairs[arglist->count];
5590                                 state = 0;
5591                         }
5592                         break;
5593
5594                 case 3: /* Parsing list */
5595                         if (*letter == ']')
5596                                 state = 2;
5597                         else if (*letter == '\0')
5598                                 return -EINVAL;
5599                         break;
5600                 }
5601                 letter++;
5602         }
5603 }
5604
5605 int
5606 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5607 {
5608         struct rte_kvargs args;
5609         struct rte_kvargs_pair *pair;
5610         unsigned int i;
5611         int result = 0;
5612
5613         memset(eth_da, 0, sizeof(*eth_da));
5614
5615         result = rte_eth_devargs_tokenise(&args, dargs);
5616         if (result < 0)
5617                 goto parse_cleanup;
5618
5619         for (i = 0; i < args.count; i++) {
5620                 pair = &args.pairs[i];
5621                 if (strcmp("representor", pair->key) == 0) {
5622                         result = rte_eth_devargs_parse_list(pair->value,
5623                                 rte_eth_devargs_parse_representor_ports,
5624                                 eth_da);
5625                         if (result < 0)
5626                                 goto parse_cleanup;
5627                 }
5628         }
5629
5630 parse_cleanup:
5631         if (args.str)
5632                 free(args.str);
5633
5634         return result;
5635 }
5636
5637 static int
5638 handle_port_list(const char *cmd __rte_unused,
5639                 const char *params __rte_unused,
5640                 struct rte_tel_data *d)
5641 {
5642         int port_id;
5643
5644         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5645         RTE_ETH_FOREACH_DEV(port_id)
5646                 rte_tel_data_add_array_int(d, port_id);
5647         return 0;
5648 }
5649
5650 static void
5651 add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5652                 const char *stat_name)
5653 {
5654         int q;
5655         struct rte_tel_data *q_data = rte_tel_data_alloc();
5656         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5657         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5658                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
5659         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5660 }
5661
5662 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5663
5664 static int
5665 handle_port_stats(const char *cmd __rte_unused,
5666                 const char *params,
5667                 struct rte_tel_data *d)
5668 {
5669         struct rte_eth_stats stats;
5670         int port_id, ret;
5671
5672         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5673                 return -1;
5674
5675         port_id = atoi(params);
5676         if (!rte_eth_dev_is_valid_port(port_id))
5677                 return -1;
5678
5679         ret = rte_eth_stats_get(port_id, &stats);
5680         if (ret < 0)
5681                 return -1;
5682
5683         rte_tel_data_start_dict(d);
5684         ADD_DICT_STAT(stats, ipackets);
5685         ADD_DICT_STAT(stats, opackets);
5686         ADD_DICT_STAT(stats, ibytes);
5687         ADD_DICT_STAT(stats, obytes);
5688         ADD_DICT_STAT(stats, imissed);
5689         ADD_DICT_STAT(stats, ierrors);
5690         ADD_DICT_STAT(stats, oerrors);
5691         ADD_DICT_STAT(stats, rx_nombuf);
5692         add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5693         add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5694         add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5695         add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5696         add_port_queue_stats(d, stats.q_errors, "q_errors");
5697
5698         return 0;
5699 }
5700
5701 static int
5702 handle_port_xstats(const char *cmd __rte_unused,
5703                 const char *params,
5704                 struct rte_tel_data *d)
5705 {
5706         struct rte_eth_xstat *eth_xstats;
5707         struct rte_eth_xstat_name *xstat_names;
5708         int port_id, num_xstats;
5709         int i, ret;
5710         char *end_param;
5711
5712         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5713                 return -1;
5714
5715         port_id = strtoul(params, &end_param, 0);
5716         if (*end_param != '\0')
5717                 RTE_ETHDEV_LOG(NOTICE,
5718                         "Extra parameters passed to ethdev telemetry command, ignoring");
5719         if (!rte_eth_dev_is_valid_port(port_id))
5720                 return -1;
5721
5722         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5723         if (num_xstats < 0)
5724                 return -1;
5725
5726         /* use one malloc for both names and stats */
5727         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5728                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
5729         if (eth_xstats == NULL)
5730                 return -1;
5731         xstat_names = (void *)&eth_xstats[num_xstats];
5732
5733         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5734         if (ret < 0 || ret > num_xstats) {
5735                 free(eth_xstats);
5736                 return -1;
5737         }
5738
5739         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5740         if (ret < 0 || ret > num_xstats) {
5741                 free(eth_xstats);
5742                 return -1;
5743         }
5744
5745         rte_tel_data_start_dict(d);
5746         for (i = 0; i < num_xstats; i++)
5747                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5748                                 eth_xstats[i].value);
5749         return 0;
5750 }
5751
5752 static int
5753 handle_port_link_status(const char *cmd __rte_unused,
5754                 const char *params,
5755                 struct rte_tel_data *d)
5756 {
5757         static const char *status_str = "status";
5758         int ret, port_id;
5759         struct rte_eth_link link;
5760         char *end_param;
5761
5762         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5763                 return -1;
5764
5765         port_id = strtoul(params, &end_param, 0);
5766         if (*end_param != '\0')
5767                 RTE_ETHDEV_LOG(NOTICE,
5768                         "Extra parameters passed to ethdev telemetry command, ignoring");
5769         if (!rte_eth_dev_is_valid_port(port_id))
5770                 return -1;
5771
5772         ret = rte_eth_link_get(port_id, &link);
5773         if (ret < 0)
5774                 return -1;
5775
5776         rte_tel_data_start_dict(d);
5777         if (!link.link_status) {
5778                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
5779                 return 0;
5780         }
5781         rte_tel_data_add_dict_string(d, status_str, "UP");
5782         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5783         rte_tel_data_add_dict_string(d, "duplex",
5784                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
5785                                 "full-duplex" : "half-duplex");
5786         return 0;
5787 }
5788
5789 int
5790 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
5791                                   struct rte_hairpin_peer_info *cur_info,
5792                                   struct rte_hairpin_peer_info *peer_info,
5793                                   uint32_t direction)
5794 {
5795         struct rte_eth_dev *dev;
5796
5797         /* Current queue information is not mandatory. */
5798         if (peer_info == NULL)
5799                 return -EINVAL;
5800
5801         /* No need to check the validity again. */
5802         dev = &rte_eth_devices[peer_port];
5803         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
5804                                 -ENOTSUP);
5805
5806         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
5807                                         cur_info, peer_info, direction);
5808 }
5809
5810 int
5811 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
5812                                 struct rte_hairpin_peer_info *peer_info,
5813                                 uint32_t direction)
5814 {
5815         struct rte_eth_dev *dev;
5816
5817         if (peer_info == NULL)
5818                 return -EINVAL;
5819
5820         /* No need to check the validity again. */
5821         dev = &rte_eth_devices[cur_port];
5822         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
5823                                 -ENOTSUP);
5824
5825         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
5826                                                         peer_info, direction);
5827 }
5828
5829 int
5830 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
5831                                   uint32_t direction)
5832 {
5833         struct rte_eth_dev *dev;
5834
5835         /* No need to check the validity again. */
5836         dev = &rte_eth_devices[cur_port];
5837         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
5838                                 -ENOTSUP);
5839
5840         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
5841                                                           direction);
5842 }
5843
5844 RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
5845
5846 RTE_INIT(ethdev_init_telemetry)
5847 {
5848         rte_telemetry_register_cmd("/ethdev/list", handle_port_list,
5849                         "Returns list of available ethdev ports. Takes no parameters");
5850         rte_telemetry_register_cmd("/ethdev/stats", handle_port_stats,
5851                         "Returns the common stats for a port. Parameters: int port_id");
5852         rte_telemetry_register_cmd("/ethdev/xstats", handle_port_xstats,
5853                         "Returns the extended stats for a port. Parameters: int port_id");
5854         rte_telemetry_register_cmd("/ethdev/link_status",
5855                         handle_port_link_status,
5856                         "Returns the link status for a port. Parameters: int port_id");
5857 }