ethdev: allow close function to return an error
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_kvargs.h>
38 #include <rte_class.h>
39 #include <rte_ether.h>
40 #include <rte_telemetry.h>
41
42 #include "rte_ethdev_trace.h"
43 #include "rte_ethdev.h"
44 #include "rte_ethdev_driver.h"
45 #include "ethdev_profile.h"
46 #include "ethdev_private.h"
47
48 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
49 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
50
51 /* spinlock for eth device callbacks */
52 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* spinlock for add/remove rx callbacks */
55 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57 /* spinlock for add/remove tx callbacks */
58 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60 /* spinlock for shared data allocation */
61 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
62
63 /* store statistics names and its offset in stats structure  */
64 struct rte_eth_xstats_name_off {
65         char name[RTE_ETH_XSTATS_NAME_SIZE];
66         unsigned offset;
67 };
68
69 /* Shared memory between primary and secondary processes. */
70 static struct {
71         uint64_t next_owner_id;
72         rte_spinlock_t ownership_lock;
73         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
74 } *rte_eth_dev_shared_data;
75
76 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
77         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
78         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
79         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
80         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
81         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
82         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
83         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
84         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
85                 rx_nombuf)},
86 };
87
88 #define RTE_NB_STATS RTE_DIM(rte_stats_strings)
89
90 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
91         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
92         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
93         {"errors", offsetof(struct rte_eth_stats, q_errors)},
94 };
95
96 #define RTE_NB_RXQ_STATS RTE_DIM(rte_rxq_stats_strings)
97
98 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
99         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
100         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
101 };
102 #define RTE_NB_TXQ_STATS RTE_DIM(rte_txq_stats_strings)
103
104 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
105         { DEV_RX_OFFLOAD_##_name, #_name }
106
107 #define RTE_ETH_RX_OFFLOAD_BIT2STR(_name)       \
108         { RTE_ETH_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
133         RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
134 };
135
136 #undef RTE_RX_OFFLOAD_BIT2STR
137 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
138
139 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
140         { DEV_TX_OFFLOAD_##_name, #_name }
141
142 static const struct {
143         uint64_t offload;
144         const char *name;
145 } rte_tx_offload_names[] = {
146         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
147         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
151         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
154         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
155         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
157         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
158         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
159         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
160         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
161         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
162         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
163         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
164         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
165         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
166         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
167         RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
168 };
169
170 #undef RTE_TX_OFFLOAD_BIT2STR
171
172 /**
173  * The user application callback description.
174  *
175  * It contains callback address to be registered by user application,
176  * the pointer to the parameters for callback, and the event type.
177  */
178 struct rte_eth_dev_callback {
179         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
180         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
181         void *cb_arg;                           /**< Parameter for callback */
182         void *ret_param;                        /**< Return parameter */
183         enum rte_eth_event_type event;          /**< Interrupt event type */
184         uint32_t active;                        /**< Callback is executing */
185 };
186
187 enum {
188         STAT_QMAP_TX = 0,
189         STAT_QMAP_RX
190 };
191
192 int
193 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
194 {
195         int ret;
196         struct rte_devargs devargs = {.args = NULL};
197         const char *bus_param_key;
198         char *bus_str = NULL;
199         char *cls_str = NULL;
200         int str_size;
201
202         memset(iter, 0, sizeof(*iter));
203
204         /*
205          * The devargs string may use various syntaxes:
206          *   - 0000:08:00.0,representor=[1-3]
207          *   - pci:0000:06:00.0,representor=[0,5]
208          *   - class=eth,mac=00:11:22:33:44:55
209          * A new syntax is in development (not yet supported):
210          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
211          */
212
213         /*
214          * Handle pure class filter (i.e. without any bus-level argument),
215          * from future new syntax.
216          * rte_devargs_parse() is not yet supporting the new syntax,
217          * that's why this simple case is temporarily parsed here.
218          */
219 #define iter_anybus_str "class=eth,"
220         if (strncmp(devargs_str, iter_anybus_str,
221                         strlen(iter_anybus_str)) == 0) {
222                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
223                 goto end;
224         }
225
226         /* Split bus, device and parameters. */
227         ret = rte_devargs_parse(&devargs, devargs_str);
228         if (ret != 0)
229                 goto error;
230
231         /*
232          * Assume parameters of old syntax can match only at ethdev level.
233          * Extra parameters will be ignored, thanks to "+" prefix.
234          */
235         str_size = strlen(devargs.args) + 2;
236         cls_str = malloc(str_size);
237         if (cls_str == NULL) {
238                 ret = -ENOMEM;
239                 goto error;
240         }
241         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
242         if (ret != str_size - 1) {
243                 ret = -EINVAL;
244                 goto error;
245         }
246         iter->cls_str = cls_str;
247         free(devargs.args); /* allocated by rte_devargs_parse() */
248         devargs.args = NULL;
249
250         iter->bus = devargs.bus;
251         if (iter->bus->dev_iterate == NULL) {
252                 ret = -ENOTSUP;
253                 goto error;
254         }
255
256         /* Convert bus args to new syntax for use with new API dev_iterate. */
257         if (strcmp(iter->bus->name, "vdev") == 0) {
258                 bus_param_key = "name";
259         } else if (strcmp(iter->bus->name, "pci") == 0) {
260                 bus_param_key = "addr";
261         } else {
262                 ret = -ENOTSUP;
263                 goto error;
264         }
265         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
266         bus_str = malloc(str_size);
267         if (bus_str == NULL) {
268                 ret = -ENOMEM;
269                 goto error;
270         }
271         ret = snprintf(bus_str, str_size, "%s=%s",
272                         bus_param_key, devargs.name);
273         if (ret != str_size - 1) {
274                 ret = -EINVAL;
275                 goto error;
276         }
277         iter->bus_str = bus_str;
278
279 end:
280         iter->cls = rte_class_find_by_name("eth");
281         return 0;
282
283 error:
284         if (ret == -ENOTSUP)
285                 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
286                                 iter->bus->name);
287         free(devargs.args);
288         free(bus_str);
289         free(cls_str);
290         return ret;
291 }
292
293 uint16_t
294 rte_eth_iterator_next(struct rte_dev_iterator *iter)
295 {
296         if (iter->cls == NULL) /* invalid ethdev iterator */
297                 return RTE_MAX_ETHPORTS;
298
299         do { /* loop to try all matching rte_device */
300                 /* If not pure ethdev filter and */
301                 if (iter->bus != NULL &&
302                                 /* not in middle of rte_eth_dev iteration, */
303                                 iter->class_device == NULL) {
304                         /* get next rte_device to try. */
305                         iter->device = iter->bus->dev_iterate(
306                                         iter->device, iter->bus_str, iter);
307                         if (iter->device == NULL)
308                                 break; /* no more rte_device candidate */
309                 }
310                 /* A device is matching bus part, need to check ethdev part. */
311                 iter->class_device = iter->cls->dev_iterate(
312                                 iter->class_device, iter->cls_str, iter);
313                 if (iter->class_device != NULL)
314                         return eth_dev_to_id(iter->class_device); /* match */
315         } while (iter->bus != NULL); /* need to try next rte_device */
316
317         /* No more ethdev port to iterate. */
318         rte_eth_iterator_cleanup(iter);
319         return RTE_MAX_ETHPORTS;
320 }
321
322 void
323 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
324 {
325         if (iter->bus_str == NULL)
326                 return; /* nothing to free in pure class filter */
327         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
328         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
329         memset(iter, 0, sizeof(*iter));
330 }
331
332 uint16_t
333 rte_eth_find_next(uint16_t port_id)
334 {
335         while (port_id < RTE_MAX_ETHPORTS &&
336                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
337                 port_id++;
338
339         if (port_id >= RTE_MAX_ETHPORTS)
340                 return RTE_MAX_ETHPORTS;
341
342         return port_id;
343 }
344
345 /*
346  * Macro to iterate over all valid ports for internal usage.
347  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
348  */
349 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
350         for (port_id = rte_eth_find_next(0); \
351              port_id < RTE_MAX_ETHPORTS; \
352              port_id = rte_eth_find_next(port_id + 1))
353
354 uint16_t
355 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
356 {
357         port_id = rte_eth_find_next(port_id);
358         while (port_id < RTE_MAX_ETHPORTS &&
359                         rte_eth_devices[port_id].device != parent)
360                 port_id = rte_eth_find_next(port_id + 1);
361
362         return port_id;
363 }
364
365 uint16_t
366 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
367 {
368         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
369         return rte_eth_find_next_of(port_id,
370                         rte_eth_devices[ref_port_id].device);
371 }
372
373 static void
374 rte_eth_dev_shared_data_prepare(void)
375 {
376         const unsigned flags = 0;
377         const struct rte_memzone *mz;
378
379         rte_spinlock_lock(&rte_eth_shared_data_lock);
380
381         if (rte_eth_dev_shared_data == NULL) {
382                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
383                         /* Allocate port data and ownership shared memory. */
384                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
385                                         sizeof(*rte_eth_dev_shared_data),
386                                         rte_socket_id(), flags);
387                 } else
388                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
389                 if (mz == NULL)
390                         rte_panic("Cannot allocate ethdev shared data\n");
391
392                 rte_eth_dev_shared_data = mz->addr;
393                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
394                         rte_eth_dev_shared_data->next_owner_id =
395                                         RTE_ETH_DEV_NO_OWNER + 1;
396                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
397                         memset(rte_eth_dev_shared_data->data, 0,
398                                sizeof(rte_eth_dev_shared_data->data));
399                 }
400         }
401
402         rte_spinlock_unlock(&rte_eth_shared_data_lock);
403 }
404
405 static bool
406 is_allocated(const struct rte_eth_dev *ethdev)
407 {
408         return ethdev->data->name[0] != '\0';
409 }
410
411 static struct rte_eth_dev *
412 _rte_eth_dev_allocated(const char *name)
413 {
414         unsigned i;
415
416         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
417                 if (rte_eth_devices[i].data != NULL &&
418                     strcmp(rte_eth_devices[i].data->name, name) == 0)
419                         return &rte_eth_devices[i];
420         }
421         return NULL;
422 }
423
424 struct rte_eth_dev *
425 rte_eth_dev_allocated(const char *name)
426 {
427         struct rte_eth_dev *ethdev;
428
429         rte_eth_dev_shared_data_prepare();
430
431         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
432
433         ethdev = _rte_eth_dev_allocated(name);
434
435         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
436
437         return ethdev;
438 }
439
440 static uint16_t
441 rte_eth_dev_find_free_port(void)
442 {
443         unsigned i;
444
445         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
446                 /* Using shared name field to find a free port. */
447                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
448                         RTE_ASSERT(rte_eth_devices[i].state ==
449                                    RTE_ETH_DEV_UNUSED);
450                         return i;
451                 }
452         }
453         return RTE_MAX_ETHPORTS;
454 }
455
456 static struct rte_eth_dev *
457 eth_dev_get(uint16_t port_id)
458 {
459         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
460
461         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
462
463         return eth_dev;
464 }
465
466 struct rte_eth_dev *
467 rte_eth_dev_allocate(const char *name)
468 {
469         uint16_t port_id;
470         struct rte_eth_dev *eth_dev = NULL;
471         size_t name_len;
472
473         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
474         if (name_len == 0) {
475                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
476                 return NULL;
477         }
478
479         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
480                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
481                 return NULL;
482         }
483
484         rte_eth_dev_shared_data_prepare();
485
486         /* Synchronize port creation between primary and secondary threads. */
487         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
488
489         if (_rte_eth_dev_allocated(name) != NULL) {
490                 RTE_ETHDEV_LOG(ERR,
491                         "Ethernet device with name %s already allocated\n",
492                         name);
493                 goto unlock;
494         }
495
496         port_id = rte_eth_dev_find_free_port();
497         if (port_id == RTE_MAX_ETHPORTS) {
498                 RTE_ETHDEV_LOG(ERR,
499                         "Reached maximum number of Ethernet ports\n");
500                 goto unlock;
501         }
502
503         eth_dev = eth_dev_get(port_id);
504         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
505         eth_dev->data->port_id = port_id;
506         eth_dev->data->mtu = RTE_ETHER_MTU;
507         pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
508
509 unlock:
510         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
511
512         return eth_dev;
513 }
514
515 /*
516  * Attach to a port already registered by the primary process, which
517  * makes sure that the same device would have the same port id both
518  * in the primary and secondary process.
519  */
520 struct rte_eth_dev *
521 rte_eth_dev_attach_secondary(const char *name)
522 {
523         uint16_t i;
524         struct rte_eth_dev *eth_dev = NULL;
525
526         rte_eth_dev_shared_data_prepare();
527
528         /* Synchronize port attachment to primary port creation and release. */
529         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
530
531         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
532                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
533                         break;
534         }
535         if (i == RTE_MAX_ETHPORTS) {
536                 RTE_ETHDEV_LOG(ERR,
537                         "Device %s is not driven by the primary process\n",
538                         name);
539         } else {
540                 eth_dev = eth_dev_get(i);
541                 RTE_ASSERT(eth_dev->data->port_id == i);
542         }
543
544         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
545         return eth_dev;
546 }
547
548 int
549 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
550 {
551         if (eth_dev == NULL)
552                 return -EINVAL;
553
554         rte_eth_dev_shared_data_prepare();
555
556         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
557                 rte_eth_dev_callback_process(eth_dev,
558                                 RTE_ETH_EVENT_DESTROY, NULL);
559
560         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
561
562         eth_dev->state = RTE_ETH_DEV_UNUSED;
563         eth_dev->device = NULL;
564         eth_dev->process_private = NULL;
565         eth_dev->intr_handle = NULL;
566         eth_dev->rx_pkt_burst = NULL;
567         eth_dev->tx_pkt_burst = NULL;
568         eth_dev->tx_pkt_prepare = NULL;
569         eth_dev->rx_queue_count = NULL;
570         eth_dev->rx_descriptor_done = NULL;
571         eth_dev->rx_descriptor_status = NULL;
572         eth_dev->tx_descriptor_status = NULL;
573         eth_dev->dev_ops = NULL;
574
575         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
576                 rte_free(eth_dev->data->rx_queues);
577                 rte_free(eth_dev->data->tx_queues);
578                 rte_free(eth_dev->data->mac_addrs);
579                 rte_free(eth_dev->data->hash_mac_addrs);
580                 rte_free(eth_dev->data->dev_private);
581                 pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
582                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
583         }
584
585         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
586
587         return 0;
588 }
589
590 int
591 rte_eth_dev_is_valid_port(uint16_t port_id)
592 {
593         if (port_id >= RTE_MAX_ETHPORTS ||
594             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
595                 return 0;
596         else
597                 return 1;
598 }
599
600 static int
601 rte_eth_is_valid_owner_id(uint64_t owner_id)
602 {
603         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
604             rte_eth_dev_shared_data->next_owner_id <= owner_id)
605                 return 0;
606         return 1;
607 }
608
609 uint64_t
610 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
611 {
612         port_id = rte_eth_find_next(port_id);
613         while (port_id < RTE_MAX_ETHPORTS &&
614                         rte_eth_devices[port_id].data->owner.id != owner_id)
615                 port_id = rte_eth_find_next(port_id + 1);
616
617         return port_id;
618 }
619
620 int
621 rte_eth_dev_owner_new(uint64_t *owner_id)
622 {
623         rte_eth_dev_shared_data_prepare();
624
625         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
626
627         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
628
629         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
630         return 0;
631 }
632
633 static int
634 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
635                        const struct rte_eth_dev_owner *new_owner)
636 {
637         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
638         struct rte_eth_dev_owner *port_owner;
639
640         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
641                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
642                         port_id);
643                 return -ENODEV;
644         }
645
646         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
647             !rte_eth_is_valid_owner_id(old_owner_id)) {
648                 RTE_ETHDEV_LOG(ERR,
649                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
650                        old_owner_id, new_owner->id);
651                 return -EINVAL;
652         }
653
654         port_owner = &rte_eth_devices[port_id].data->owner;
655         if (port_owner->id != old_owner_id) {
656                 RTE_ETHDEV_LOG(ERR,
657                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
658                         port_id, port_owner->name, port_owner->id);
659                 return -EPERM;
660         }
661
662         /* can not truncate (same structure) */
663         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
664
665         port_owner->id = new_owner->id;
666
667         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
668                 port_id, new_owner->name, new_owner->id);
669
670         return 0;
671 }
672
673 int
674 rte_eth_dev_owner_set(const uint16_t port_id,
675                       const struct rte_eth_dev_owner *owner)
676 {
677         int ret;
678
679         rte_eth_dev_shared_data_prepare();
680
681         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
682
683         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
684
685         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
686         return ret;
687 }
688
689 int
690 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
691 {
692         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
693                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
694         int ret;
695
696         rte_eth_dev_shared_data_prepare();
697
698         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
699
700         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
701
702         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
703         return ret;
704 }
705
706 int
707 rte_eth_dev_owner_delete(const uint64_t owner_id)
708 {
709         uint16_t port_id;
710         int ret = 0;
711
712         rte_eth_dev_shared_data_prepare();
713
714         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
715
716         if (rte_eth_is_valid_owner_id(owner_id)) {
717                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
718                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
719                                 memset(&rte_eth_devices[port_id].data->owner, 0,
720                                        sizeof(struct rte_eth_dev_owner));
721                 RTE_ETHDEV_LOG(NOTICE,
722                         "All port owners owned by %016"PRIx64" identifier have removed\n",
723                         owner_id);
724         } else {
725                 RTE_ETHDEV_LOG(ERR,
726                                "Invalid owner id=%016"PRIx64"\n",
727                                owner_id);
728                 ret = -EINVAL;
729         }
730
731         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
732
733         return ret;
734 }
735
736 int
737 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
738 {
739         int ret = 0;
740         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
741
742         rte_eth_dev_shared_data_prepare();
743
744         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
745
746         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
747                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
748                         port_id);
749                 ret = -ENODEV;
750         } else {
751                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
752         }
753
754         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
755         return ret;
756 }
757
758 int
759 rte_eth_dev_socket_id(uint16_t port_id)
760 {
761         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
762         return rte_eth_devices[port_id].data->numa_node;
763 }
764
765 void *
766 rte_eth_dev_get_sec_ctx(uint16_t port_id)
767 {
768         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
769         return rte_eth_devices[port_id].security_ctx;
770 }
771
772 uint16_t
773 rte_eth_dev_count_avail(void)
774 {
775         uint16_t p;
776         uint16_t count;
777
778         count = 0;
779
780         RTE_ETH_FOREACH_DEV(p)
781                 count++;
782
783         return count;
784 }
785
786 uint16_t
787 rte_eth_dev_count_total(void)
788 {
789         uint16_t port, count = 0;
790
791         RTE_ETH_FOREACH_VALID_DEV(port)
792                 count++;
793
794         return count;
795 }
796
797 int
798 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
799 {
800         char *tmp;
801
802         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
803
804         if (name == NULL) {
805                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
806                 return -EINVAL;
807         }
808
809         /* shouldn't check 'rte_eth_devices[i].data',
810          * because it might be overwritten by VDEV PMD */
811         tmp = rte_eth_dev_shared_data->data[port_id].name;
812         strcpy(name, tmp);
813         return 0;
814 }
815
816 int
817 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
818 {
819         uint32_t pid;
820
821         if (name == NULL) {
822                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
823                 return -EINVAL;
824         }
825
826         RTE_ETH_FOREACH_VALID_DEV(pid)
827                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
828                         *port_id = pid;
829                         return 0;
830                 }
831
832         return -ENODEV;
833 }
834
835 static int
836 eth_err(uint16_t port_id, int ret)
837 {
838         if (ret == 0)
839                 return 0;
840         if (rte_eth_dev_is_removed(port_id))
841                 return -EIO;
842         return ret;
843 }
844
845 static int
846 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
847 {
848         uint16_t old_nb_queues = dev->data->nb_rx_queues;
849         void **rxq;
850         unsigned i;
851
852         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
853                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
854                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
855                                 RTE_CACHE_LINE_SIZE);
856                 if (dev->data->rx_queues == NULL) {
857                         dev->data->nb_rx_queues = 0;
858                         return -(ENOMEM);
859                 }
860         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
861                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
862
863                 rxq = dev->data->rx_queues;
864
865                 for (i = nb_queues; i < old_nb_queues; i++)
866                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
867                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
868                                 RTE_CACHE_LINE_SIZE);
869                 if (rxq == NULL)
870                         return -(ENOMEM);
871                 if (nb_queues > old_nb_queues) {
872                         uint16_t new_qs = nb_queues - old_nb_queues;
873
874                         memset(rxq + old_nb_queues, 0,
875                                 sizeof(rxq[0]) * new_qs);
876                 }
877
878                 dev->data->rx_queues = rxq;
879
880         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
881                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
882
883                 rxq = dev->data->rx_queues;
884
885                 for (i = nb_queues; i < old_nb_queues; i++)
886                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
887
888                 rte_free(dev->data->rx_queues);
889                 dev->data->rx_queues = NULL;
890         }
891         dev->data->nb_rx_queues = nb_queues;
892         return 0;
893 }
894
895 static int
896 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
897 {
898         uint16_t port_id;
899
900         if (rx_queue_id >= dev->data->nb_rx_queues) {
901                 port_id = dev->data->port_id;
902                 RTE_ETHDEV_LOG(ERR,
903                                "Invalid Rx queue_id=%u of device with port_id=%u\n",
904                                rx_queue_id, port_id);
905                 return -EINVAL;
906         }
907
908         if (dev->data->rx_queues[rx_queue_id] == NULL) {
909                 port_id = dev->data->port_id;
910                 RTE_ETHDEV_LOG(ERR,
911                                "Queue %u of device with port_id=%u has not been setup\n",
912                                rx_queue_id, port_id);
913                 return -EINVAL;
914         }
915
916         return 0;
917 }
918
919 static int
920 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
921 {
922         uint16_t port_id;
923
924         if (tx_queue_id >= dev->data->nb_tx_queues) {
925                 port_id = dev->data->port_id;
926                 RTE_ETHDEV_LOG(ERR,
927                                "Invalid Tx queue_id=%u of device with port_id=%u\n",
928                                tx_queue_id, port_id);
929                 return -EINVAL;
930         }
931
932         if (dev->data->tx_queues[tx_queue_id] == NULL) {
933                 port_id = dev->data->port_id;
934                 RTE_ETHDEV_LOG(ERR,
935                                "Queue %u of device with port_id=%u has not been setup\n",
936                                tx_queue_id, port_id);
937                 return -EINVAL;
938         }
939
940         return 0;
941 }
942
943 int
944 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
945 {
946         struct rte_eth_dev *dev;
947         int ret;
948
949         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
950
951         dev = &rte_eth_devices[port_id];
952         if (!dev->data->dev_started) {
953                 RTE_ETHDEV_LOG(ERR,
954                         "Port %u must be started before start any queue\n",
955                         port_id);
956                 return -EINVAL;
957         }
958
959         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
960         if (ret != 0)
961                 return ret;
962
963         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
964
965         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
966                 RTE_ETHDEV_LOG(INFO,
967                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
968                         rx_queue_id, port_id);
969                 return -EINVAL;
970         }
971
972         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
973                 RTE_ETHDEV_LOG(INFO,
974                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
975                         rx_queue_id, port_id);
976                 return 0;
977         }
978
979         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
980                                                              rx_queue_id));
981
982 }
983
984 int
985 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
986 {
987         struct rte_eth_dev *dev;
988         int ret;
989
990         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
991
992         dev = &rte_eth_devices[port_id];
993
994         ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
995         if (ret != 0)
996                 return ret;
997
998         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
999
1000         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
1001                 RTE_ETHDEV_LOG(INFO,
1002                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1003                         rx_queue_id, port_id);
1004                 return -EINVAL;
1005         }
1006
1007         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1008                 RTE_ETHDEV_LOG(INFO,
1009                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1010                         rx_queue_id, port_id);
1011                 return 0;
1012         }
1013
1014         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
1015
1016 }
1017
1018 int
1019 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
1020 {
1021         struct rte_eth_dev *dev;
1022         int ret;
1023
1024         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1025
1026         dev = &rte_eth_devices[port_id];
1027         if (!dev->data->dev_started) {
1028                 RTE_ETHDEV_LOG(ERR,
1029                         "Port %u must be started before start any queue\n",
1030                         port_id);
1031                 return -EINVAL;
1032         }
1033
1034         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1035         if (ret != 0)
1036                 return ret;
1037
1038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
1039
1040         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1041                 RTE_ETHDEV_LOG(INFO,
1042                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1043                         tx_queue_id, port_id);
1044                 return -EINVAL;
1045         }
1046
1047         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
1048                 RTE_ETHDEV_LOG(INFO,
1049                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
1050                         tx_queue_id, port_id);
1051                 return 0;
1052         }
1053
1054         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
1055 }
1056
1057 int
1058 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
1059 {
1060         struct rte_eth_dev *dev;
1061         int ret;
1062
1063         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1064
1065         dev = &rte_eth_devices[port_id];
1066
1067         ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
1068         if (ret != 0)
1069                 return ret;
1070
1071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1072
1073         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1074                 RTE_ETHDEV_LOG(INFO,
1075                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1076                         tx_queue_id, port_id);
1077                 return -EINVAL;
1078         }
1079
1080         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1081                 RTE_ETHDEV_LOG(INFO,
1082                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1083                         tx_queue_id, port_id);
1084                 return 0;
1085         }
1086
1087         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1088
1089 }
1090
1091 static int
1092 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1093 {
1094         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1095         void **txq;
1096         unsigned i;
1097
1098         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1099                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1100                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1101                                                    RTE_CACHE_LINE_SIZE);
1102                 if (dev->data->tx_queues == NULL) {
1103                         dev->data->nb_tx_queues = 0;
1104                         return -(ENOMEM);
1105                 }
1106         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1107                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1108
1109                 txq = dev->data->tx_queues;
1110
1111                 for (i = nb_queues; i < old_nb_queues; i++)
1112                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1113                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1114                                   RTE_CACHE_LINE_SIZE);
1115                 if (txq == NULL)
1116                         return -ENOMEM;
1117                 if (nb_queues > old_nb_queues) {
1118                         uint16_t new_qs = nb_queues - old_nb_queues;
1119
1120                         memset(txq + old_nb_queues, 0,
1121                                sizeof(txq[0]) * new_qs);
1122                 }
1123
1124                 dev->data->tx_queues = txq;
1125
1126         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1127                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1128
1129                 txq = dev->data->tx_queues;
1130
1131                 for (i = nb_queues; i < old_nb_queues; i++)
1132                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1133
1134                 rte_free(dev->data->tx_queues);
1135                 dev->data->tx_queues = NULL;
1136         }
1137         dev->data->nb_tx_queues = nb_queues;
1138         return 0;
1139 }
1140
1141 uint32_t
1142 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1143 {
1144         switch (speed) {
1145         case ETH_SPEED_NUM_10M:
1146                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1147         case ETH_SPEED_NUM_100M:
1148                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1149         case ETH_SPEED_NUM_1G:
1150                 return ETH_LINK_SPEED_1G;
1151         case ETH_SPEED_NUM_2_5G:
1152                 return ETH_LINK_SPEED_2_5G;
1153         case ETH_SPEED_NUM_5G:
1154                 return ETH_LINK_SPEED_5G;
1155         case ETH_SPEED_NUM_10G:
1156                 return ETH_LINK_SPEED_10G;
1157         case ETH_SPEED_NUM_20G:
1158                 return ETH_LINK_SPEED_20G;
1159         case ETH_SPEED_NUM_25G:
1160                 return ETH_LINK_SPEED_25G;
1161         case ETH_SPEED_NUM_40G:
1162                 return ETH_LINK_SPEED_40G;
1163         case ETH_SPEED_NUM_50G:
1164                 return ETH_LINK_SPEED_50G;
1165         case ETH_SPEED_NUM_56G:
1166                 return ETH_LINK_SPEED_56G;
1167         case ETH_SPEED_NUM_100G:
1168                 return ETH_LINK_SPEED_100G;
1169         case ETH_SPEED_NUM_200G:
1170                 return ETH_LINK_SPEED_200G;
1171         default:
1172                 return 0;
1173         }
1174 }
1175
1176 const char *
1177 rte_eth_dev_rx_offload_name(uint64_t offload)
1178 {
1179         const char *name = "UNKNOWN";
1180         unsigned int i;
1181
1182         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1183                 if (offload == rte_rx_offload_names[i].offload) {
1184                         name = rte_rx_offload_names[i].name;
1185                         break;
1186                 }
1187         }
1188
1189         return name;
1190 }
1191
1192 const char *
1193 rte_eth_dev_tx_offload_name(uint64_t offload)
1194 {
1195         const char *name = "UNKNOWN";
1196         unsigned int i;
1197
1198         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1199                 if (offload == rte_tx_offload_names[i].offload) {
1200                         name = rte_tx_offload_names[i].name;
1201                         break;
1202                 }
1203         }
1204
1205         return name;
1206 }
1207
1208 static inline int
1209 check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1210                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1211 {
1212         int ret = 0;
1213
1214         if (dev_info_size == 0) {
1215                 if (config_size != max_rx_pkt_len) {
1216                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1217                                        " %u != %u is not allowed\n",
1218                                        port_id, config_size, max_rx_pkt_len);
1219                         ret = -EINVAL;
1220                 }
1221         } else if (config_size > dev_info_size) {
1222                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1223                                "> max allowed value %u\n", port_id, config_size,
1224                                dev_info_size);
1225                 ret = -EINVAL;
1226         } else if (config_size < RTE_ETHER_MIN_LEN) {
1227                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1228                                "< min allowed value %u\n", port_id, config_size,
1229                                (unsigned int)RTE_ETHER_MIN_LEN);
1230                 ret = -EINVAL;
1231         }
1232         return ret;
1233 }
1234
1235 /*
1236  * Validate offloads that are requested through rte_eth_dev_configure against
1237  * the offloads successfully set by the ethernet device.
1238  *
1239  * @param port_id
1240  *   The port identifier of the Ethernet device.
1241  * @param req_offloads
1242  *   The offloads that have been requested through `rte_eth_dev_configure`.
1243  * @param set_offloads
1244  *   The offloads successfully set by the ethernet device.
1245  * @param offload_type
1246  *   The offload type i.e. Rx/Tx string.
1247  * @param offload_name
1248  *   The function that prints the offload name.
1249  * @return
1250  *   - (0) if validation successful.
1251  *   - (-EINVAL) if requested offload has been silently disabled.
1252  *
1253  */
1254 static int
1255 validate_offloads(uint16_t port_id, uint64_t req_offloads,
1256                   uint64_t set_offloads, const char *offload_type,
1257                   const char *(*offload_name)(uint64_t))
1258 {
1259         uint64_t offloads_diff = req_offloads ^ set_offloads;
1260         uint64_t offload;
1261         int ret = 0;
1262
1263         while (offloads_diff != 0) {
1264                 /* Check if any offload is requested but not enabled. */
1265                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1266                 if (offload & req_offloads) {
1267                         RTE_ETHDEV_LOG(ERR,
1268                                 "Port %u failed to enable %s offload %s\n",
1269                                 port_id, offload_type, offload_name(offload));
1270                         ret = -EINVAL;
1271                 }
1272
1273                 /* Check if offload couldn't be disabled. */
1274                 if (offload & set_offloads) {
1275                         RTE_ETHDEV_LOG(DEBUG,
1276                                 "Port %u %s offload %s is not requested but enabled\n",
1277                                 port_id, offload_type, offload_name(offload));
1278                 }
1279
1280                 offloads_diff &= ~offload;
1281         }
1282
1283         return ret;
1284 }
1285
1286 int
1287 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1288                       const struct rte_eth_conf *dev_conf)
1289 {
1290         struct rte_eth_dev *dev;
1291         struct rte_eth_dev_info dev_info;
1292         struct rte_eth_conf orig_conf;
1293         int diag;
1294         int ret;
1295
1296         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1297
1298         dev = &rte_eth_devices[port_id];
1299
1300         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1301
1302         if (dev->data->dev_started) {
1303                 RTE_ETHDEV_LOG(ERR,
1304                         "Port %u must be stopped to allow configuration\n",
1305                         port_id);
1306                 return -EBUSY;
1307         }
1308
1309          /* Store original config, as rollback required on failure */
1310         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1311
1312         /*
1313          * Copy the dev_conf parameter into the dev structure.
1314          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1315          */
1316         if (dev_conf != &dev->data->dev_conf)
1317                 memcpy(&dev->data->dev_conf, dev_conf,
1318                        sizeof(dev->data->dev_conf));
1319
1320         ret = rte_eth_dev_info_get(port_id, &dev_info);
1321         if (ret != 0)
1322                 goto rollback;
1323
1324         /* If number of queues specified by application for both Rx and Tx is
1325          * zero, use driver preferred values. This cannot be done individually
1326          * as it is valid for either Tx or Rx (but not both) to be zero.
1327          * If driver does not provide any preferred valued, fall back on
1328          * EAL defaults.
1329          */
1330         if (nb_rx_q == 0 && nb_tx_q == 0) {
1331                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1332                 if (nb_rx_q == 0)
1333                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1334                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1335                 if (nb_tx_q == 0)
1336                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1337         }
1338
1339         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1340                 RTE_ETHDEV_LOG(ERR,
1341                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1342                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1343                 ret = -EINVAL;
1344                 goto rollback;
1345         }
1346
1347         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1348                 RTE_ETHDEV_LOG(ERR,
1349                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1350                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1351                 ret = -EINVAL;
1352                 goto rollback;
1353         }
1354
1355         /*
1356          * Check that the numbers of RX and TX queues are not greater
1357          * than the maximum number of RX and TX queues supported by the
1358          * configured device.
1359          */
1360         if (nb_rx_q > dev_info.max_rx_queues) {
1361                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1362                         port_id, nb_rx_q, dev_info.max_rx_queues);
1363                 ret = -EINVAL;
1364                 goto rollback;
1365         }
1366
1367         if (nb_tx_q > dev_info.max_tx_queues) {
1368                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1369                         port_id, nb_tx_q, dev_info.max_tx_queues);
1370                 ret = -EINVAL;
1371                 goto rollback;
1372         }
1373
1374         /* Check that the device supports requested interrupts */
1375         if ((dev_conf->intr_conf.lsc == 1) &&
1376                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1377                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1378                         dev->device->driver->name);
1379                 ret = -EINVAL;
1380                 goto rollback;
1381         }
1382         if ((dev_conf->intr_conf.rmv == 1) &&
1383                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1384                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1385                         dev->device->driver->name);
1386                 ret = -EINVAL;
1387                 goto rollback;
1388         }
1389
1390         /*
1391          * If jumbo frames are enabled, check that the maximum RX packet
1392          * length is supported by the configured device.
1393          */
1394         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1395                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1396                         RTE_ETHDEV_LOG(ERR,
1397                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1398                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1399                                 dev_info.max_rx_pktlen);
1400                         ret = -EINVAL;
1401                         goto rollback;
1402                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1403                         RTE_ETHDEV_LOG(ERR,
1404                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1405                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1406                                 (unsigned int)RTE_ETHER_MIN_LEN);
1407                         ret = -EINVAL;
1408                         goto rollback;
1409                 }
1410         } else {
1411                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1412                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1413                         /* Use default value */
1414                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1415                                                         RTE_ETHER_MAX_LEN;
1416         }
1417
1418         /*
1419          * If LRO is enabled, check that the maximum aggregated packet
1420          * size is supported by the configured device.
1421          */
1422         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1423                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1424                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1425                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1426                 ret = check_lro_pkt_size(port_id,
1427                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1428                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1429                                 dev_info.max_lro_pkt_size);
1430                 if (ret != 0)
1431                         goto rollback;
1432         }
1433
1434         /* Any requested offloading must be within its device capabilities */
1435         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1436              dev_conf->rxmode.offloads) {
1437                 RTE_ETHDEV_LOG(ERR,
1438                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1439                         "capabilities 0x%"PRIx64" in %s()\n",
1440                         port_id, dev_conf->rxmode.offloads,
1441                         dev_info.rx_offload_capa,
1442                         __func__);
1443                 ret = -EINVAL;
1444                 goto rollback;
1445         }
1446         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1447              dev_conf->txmode.offloads) {
1448                 RTE_ETHDEV_LOG(ERR,
1449                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1450                         "capabilities 0x%"PRIx64" in %s()\n",
1451                         port_id, dev_conf->txmode.offloads,
1452                         dev_info.tx_offload_capa,
1453                         __func__);
1454                 ret = -EINVAL;
1455                 goto rollback;
1456         }
1457
1458         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1459                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1460
1461         /* Check that device supports requested rss hash functions. */
1462         if ((dev_info.flow_type_rss_offloads |
1463              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1464             dev_info.flow_type_rss_offloads) {
1465                 RTE_ETHDEV_LOG(ERR,
1466                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1467                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1468                         dev_info.flow_type_rss_offloads);
1469                 ret = -EINVAL;
1470                 goto rollback;
1471         }
1472
1473         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1474         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1475             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1476                 RTE_ETHDEV_LOG(ERR,
1477                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1478                         port_id,
1479                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1480                 ret = -EINVAL;
1481                 goto rollback;
1482         }
1483
1484         /*
1485          * Setup new number of RX/TX queues and reconfigure device.
1486          */
1487         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1488         if (diag != 0) {
1489                 RTE_ETHDEV_LOG(ERR,
1490                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1491                         port_id, diag);
1492                 ret = diag;
1493                 goto rollback;
1494         }
1495
1496         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1497         if (diag != 0) {
1498                 RTE_ETHDEV_LOG(ERR,
1499                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1500                         port_id, diag);
1501                 rte_eth_dev_rx_queue_config(dev, 0);
1502                 ret = diag;
1503                 goto rollback;
1504         }
1505
1506         diag = (*dev->dev_ops->dev_configure)(dev);
1507         if (diag != 0) {
1508                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1509                         port_id, diag);
1510                 ret = eth_err(port_id, diag);
1511                 goto reset_queues;
1512         }
1513
1514         /* Initialize Rx profiling if enabled at compilation time. */
1515         diag = __rte_eth_dev_profile_init(port_id, dev);
1516         if (diag != 0) {
1517                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1518                         port_id, diag);
1519                 ret = eth_err(port_id, diag);
1520                 goto reset_queues;
1521         }
1522
1523         /* Validate Rx offloads. */
1524         diag = validate_offloads(port_id,
1525                         dev_conf->rxmode.offloads,
1526                         dev->data->dev_conf.rxmode.offloads, "Rx",
1527                         rte_eth_dev_rx_offload_name);
1528         if (diag != 0) {
1529                 ret = diag;
1530                 goto reset_queues;
1531         }
1532
1533         /* Validate Tx offloads. */
1534         diag = validate_offloads(port_id,
1535                         dev_conf->txmode.offloads,
1536                         dev->data->dev_conf.txmode.offloads, "Tx",
1537                         rte_eth_dev_tx_offload_name);
1538         if (diag != 0) {
1539                 ret = diag;
1540                 goto reset_queues;
1541         }
1542
1543         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1544         return 0;
1545 reset_queues:
1546         rte_eth_dev_rx_queue_config(dev, 0);
1547         rte_eth_dev_tx_queue_config(dev, 0);
1548 rollback:
1549         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1550
1551         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1552         return ret;
1553 }
1554
1555 void
1556 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
1557 {
1558         if (dev->data->dev_started) {
1559                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1560                         dev->data->port_id);
1561                 return;
1562         }
1563
1564         rte_eth_dev_rx_queue_config(dev, 0);
1565         rte_eth_dev_tx_queue_config(dev, 0);
1566
1567         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1568 }
1569
1570 static void
1571 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1572                         struct rte_eth_dev_info *dev_info)
1573 {
1574         struct rte_ether_addr *addr;
1575         uint16_t i;
1576         uint32_t pool = 0;
1577         uint64_t pool_mask;
1578
1579         /* replay MAC address configuration including default MAC */
1580         addr = &dev->data->mac_addrs[0];
1581         if (*dev->dev_ops->mac_addr_set != NULL)
1582                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1583         else if (*dev->dev_ops->mac_addr_add != NULL)
1584                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1585
1586         if (*dev->dev_ops->mac_addr_add != NULL) {
1587                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1588                         addr = &dev->data->mac_addrs[i];
1589
1590                         /* skip zero address */
1591                         if (rte_is_zero_ether_addr(addr))
1592                                 continue;
1593
1594                         pool = 0;
1595                         pool_mask = dev->data->mac_pool_sel[i];
1596
1597                         do {
1598                                 if (pool_mask & 1ULL)
1599                                         (*dev->dev_ops->mac_addr_add)(dev,
1600                                                 addr, i, pool);
1601                                 pool_mask >>= 1;
1602                                 pool++;
1603                         } while (pool_mask);
1604                 }
1605         }
1606 }
1607
1608 static int
1609 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1610                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1611 {
1612         int ret;
1613
1614         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1615                 rte_eth_dev_mac_restore(dev, dev_info);
1616
1617         /* replay promiscuous configuration */
1618         /*
1619          * use callbacks directly since we don't need port_id check and
1620          * would like to bypass the same value set
1621          */
1622         if (rte_eth_promiscuous_get(port_id) == 1 &&
1623             *dev->dev_ops->promiscuous_enable != NULL) {
1624                 ret = eth_err(port_id,
1625                               (*dev->dev_ops->promiscuous_enable)(dev));
1626                 if (ret != 0 && ret != -ENOTSUP) {
1627                         RTE_ETHDEV_LOG(ERR,
1628                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1629                                 port_id, rte_strerror(-ret));
1630                         return ret;
1631                 }
1632         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1633                    *dev->dev_ops->promiscuous_disable != NULL) {
1634                 ret = eth_err(port_id,
1635                               (*dev->dev_ops->promiscuous_disable)(dev));
1636                 if (ret != 0 && ret != -ENOTSUP) {
1637                         RTE_ETHDEV_LOG(ERR,
1638                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1639                                 port_id, rte_strerror(-ret));
1640                         return ret;
1641                 }
1642         }
1643
1644         /* replay all multicast configuration */
1645         /*
1646          * use callbacks directly since we don't need port_id check and
1647          * would like to bypass the same value set
1648          */
1649         if (rte_eth_allmulticast_get(port_id) == 1 &&
1650             *dev->dev_ops->allmulticast_enable != NULL) {
1651                 ret = eth_err(port_id,
1652                               (*dev->dev_ops->allmulticast_enable)(dev));
1653                 if (ret != 0 && ret != -ENOTSUP) {
1654                         RTE_ETHDEV_LOG(ERR,
1655                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1656                                 port_id, rte_strerror(-ret));
1657                         return ret;
1658                 }
1659         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1660                    *dev->dev_ops->allmulticast_disable != NULL) {
1661                 ret = eth_err(port_id,
1662                               (*dev->dev_ops->allmulticast_disable)(dev));
1663                 if (ret != 0 && ret != -ENOTSUP) {
1664                         RTE_ETHDEV_LOG(ERR,
1665                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1666                                 port_id, rte_strerror(-ret));
1667                         return ret;
1668                 }
1669         }
1670
1671         return 0;
1672 }
1673
1674 int
1675 rte_eth_dev_start(uint16_t port_id)
1676 {
1677         struct rte_eth_dev *dev;
1678         struct rte_eth_dev_info dev_info;
1679         int diag;
1680         int ret;
1681
1682         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1683
1684         dev = &rte_eth_devices[port_id];
1685
1686         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1687
1688         if (dev->data->dev_started != 0) {
1689                 RTE_ETHDEV_LOG(INFO,
1690                         "Device with port_id=%"PRIu16" already started\n",
1691                         port_id);
1692                 return 0;
1693         }
1694
1695         ret = rte_eth_dev_info_get(port_id, &dev_info);
1696         if (ret != 0)
1697                 return ret;
1698
1699         /* Lets restore MAC now if device does not support live change */
1700         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1701                 rte_eth_dev_mac_restore(dev, &dev_info);
1702
1703         diag = (*dev->dev_ops->dev_start)(dev);
1704         if (diag == 0)
1705                 dev->data->dev_started = 1;
1706         else
1707                 return eth_err(port_id, diag);
1708
1709         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1710         if (ret != 0) {
1711                 RTE_ETHDEV_LOG(ERR,
1712                         "Error during restoring configuration for device (port %u): %s\n",
1713                         port_id, rte_strerror(-ret));
1714                 rte_eth_dev_stop(port_id);
1715                 return ret;
1716         }
1717
1718         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1719                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1720                 (*dev->dev_ops->link_update)(dev, 0);
1721         }
1722
1723         rte_ethdev_trace_start(port_id);
1724         return 0;
1725 }
1726
1727 void
1728 rte_eth_dev_stop(uint16_t port_id)
1729 {
1730         struct rte_eth_dev *dev;
1731
1732         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1733         dev = &rte_eth_devices[port_id];
1734
1735         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1736
1737         if (dev->data->dev_started == 0) {
1738                 RTE_ETHDEV_LOG(INFO,
1739                         "Device with port_id=%"PRIu16" already stopped\n",
1740                         port_id);
1741                 return;
1742         }
1743
1744         dev->data->dev_started = 0;
1745         (*dev->dev_ops->dev_stop)(dev);
1746         rte_ethdev_trace_stop(port_id);
1747 }
1748
1749 int
1750 rte_eth_dev_set_link_up(uint16_t port_id)
1751 {
1752         struct rte_eth_dev *dev;
1753
1754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1755
1756         dev = &rte_eth_devices[port_id];
1757
1758         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1759         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1760 }
1761
1762 int
1763 rte_eth_dev_set_link_down(uint16_t port_id)
1764 {
1765         struct rte_eth_dev *dev;
1766
1767         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1768
1769         dev = &rte_eth_devices[port_id];
1770
1771         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1772         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1773 }
1774
1775 int
1776 rte_eth_dev_close(uint16_t port_id)
1777 {
1778         struct rte_eth_dev *dev;
1779         int firsterr, binerr;
1780         int *lasterr = &firsterr;
1781
1782         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1783         dev = &rte_eth_devices[port_id];
1784
1785         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1786         *lasterr = (*dev->dev_ops->dev_close)(dev);
1787         if (*lasterr != 0)
1788                 lasterr = &binerr;
1789
1790         rte_ethdev_trace_close(port_id);
1791         *lasterr = rte_eth_dev_release_port(dev);
1792
1793         return eth_err(port_id, firsterr);
1794 }
1795
1796 int
1797 rte_eth_dev_reset(uint16_t port_id)
1798 {
1799         struct rte_eth_dev *dev;
1800         int ret;
1801
1802         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1803         dev = &rte_eth_devices[port_id];
1804
1805         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1806
1807         rte_eth_dev_stop(port_id);
1808         ret = dev->dev_ops->dev_reset(dev);
1809
1810         return eth_err(port_id, ret);
1811 }
1812
1813 int
1814 rte_eth_dev_is_removed(uint16_t port_id)
1815 {
1816         struct rte_eth_dev *dev;
1817         int ret;
1818
1819         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1820
1821         dev = &rte_eth_devices[port_id];
1822
1823         if (dev->state == RTE_ETH_DEV_REMOVED)
1824                 return 1;
1825
1826         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1827
1828         ret = dev->dev_ops->is_removed(dev);
1829         if (ret != 0)
1830                 /* Device is physically removed. */
1831                 dev->state = RTE_ETH_DEV_REMOVED;
1832
1833         return ret;
1834 }
1835
1836 static int
1837 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1838                              uint16_t n_seg, uint32_t *mbp_buf_size,
1839                              const struct rte_eth_dev_info *dev_info)
1840 {
1841         const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1842         struct rte_mempool *mp_first;
1843         uint32_t offset_mask;
1844         uint16_t seg_idx;
1845
1846         if (n_seg > seg_capa->max_nseg) {
1847                 RTE_ETHDEV_LOG(ERR,
1848                                "Requested Rx segments %u exceed supported %u\n",
1849                                n_seg, seg_capa->max_nseg);
1850                 return -EINVAL;
1851         }
1852         /*
1853          * Check the sizes and offsets against buffer sizes
1854          * for each segment specified in extended configuration.
1855          */
1856         mp_first = rx_seg[0].mp;
1857         offset_mask = (1u << seg_capa->offset_align_log2) - 1;
1858         for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1859                 struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1860                 uint32_t length = rx_seg[seg_idx].length;
1861                 uint32_t offset = rx_seg[seg_idx].offset;
1862
1863                 if (mpl == NULL) {
1864                         RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1865                         return -EINVAL;
1866                 }
1867                 if (seg_idx != 0 && mp_first != mpl &&
1868                     seg_capa->multi_pools == 0) {
1869                         RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1870                         return -ENOTSUP;
1871                 }
1872                 if (offset != 0) {
1873                         if (seg_capa->offset_allowed == 0) {
1874                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1875                                 return -ENOTSUP;
1876                         }
1877                         if (offset & offset_mask) {
1878                                 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1879                                                offset,
1880                                                seg_capa->offset_align_log2);
1881                                 return -EINVAL;
1882                         }
1883                 }
1884                 if (mpl->private_data_size <
1885                         sizeof(struct rte_pktmbuf_pool_private)) {
1886                         RTE_ETHDEV_LOG(ERR,
1887                                        "%s private_data_size %u < %u\n",
1888                                        mpl->name, mpl->private_data_size,
1889                                        (unsigned int)sizeof
1890                                         (struct rte_pktmbuf_pool_private));
1891                         return -ENOSPC;
1892                 }
1893                 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1894                 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1895                 length = length != 0 ? length : *mbp_buf_size;
1896                 if (*mbp_buf_size < length + offset) {
1897                         RTE_ETHDEV_LOG(ERR,
1898                                        "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1899                                        mpl->name, *mbp_buf_size,
1900                                        length + offset, length, offset);
1901                         return -EINVAL;
1902                 }
1903         }
1904         return 0;
1905 }
1906
1907 int
1908 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1909                        uint16_t nb_rx_desc, unsigned int socket_id,
1910                        const struct rte_eth_rxconf *rx_conf,
1911                        struct rte_mempool *mp)
1912 {
1913         int ret;
1914         uint32_t mbp_buf_size;
1915         struct rte_eth_dev *dev;
1916         struct rte_eth_dev_info dev_info;
1917         struct rte_eth_rxconf local_conf;
1918         void **rxq;
1919
1920         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1921
1922         dev = &rte_eth_devices[port_id];
1923         if (rx_queue_id >= dev->data->nb_rx_queues) {
1924                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1925                 return -EINVAL;
1926         }
1927
1928         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1929
1930         ret = rte_eth_dev_info_get(port_id, &dev_info);
1931         if (ret != 0)
1932                 return ret;
1933
1934         if (mp != NULL) {
1935                 /* Single pool configuration check. */
1936                 if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1937                         RTE_ETHDEV_LOG(ERR,
1938                                        "Ambiguous segment configuration\n");
1939                         return -EINVAL;
1940                 }
1941                 /*
1942                  * Check the size of the mbuf data buffer, this value
1943                  * must be provided in the private data of the memory pool.
1944                  * First check that the memory pool(s) has a valid private data.
1945                  */
1946                 if (mp->private_data_size <
1947                                 sizeof(struct rte_pktmbuf_pool_private)) {
1948                         RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1949                                 mp->name, mp->private_data_size,
1950                                 (unsigned int)
1951                                 sizeof(struct rte_pktmbuf_pool_private));
1952                         return -ENOSPC;
1953                 }
1954                 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1955                 if (mbp_buf_size < dev_info.min_rx_bufsize +
1956                                    RTE_PKTMBUF_HEADROOM) {
1957                         RTE_ETHDEV_LOG(ERR,
1958                                        "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1959                                        mp->name, mbp_buf_size,
1960                                        RTE_PKTMBUF_HEADROOM +
1961                                        dev_info.min_rx_bufsize,
1962                                        RTE_PKTMBUF_HEADROOM,
1963                                        dev_info.min_rx_bufsize);
1964                         return -EINVAL;
1965                 }
1966         } else {
1967                 const struct rte_eth_rxseg_split *rx_seg =
1968                         (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
1969                 uint16_t n_seg = rx_conf->rx_nseg;
1970
1971                 /* Extended multi-segment configuration check. */
1972                 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
1973                         RTE_ETHDEV_LOG(ERR,
1974                                        "Memory pool is null and no extended configuration provided\n");
1975                         return -EINVAL;
1976                 }
1977                 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1978                         ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
1979                                                            &mbp_buf_size,
1980                                                            &dev_info);
1981                         if (ret != 0)
1982                                 return ret;
1983                 } else {
1984                         RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
1985                         return -EINVAL;
1986                 }
1987         }
1988
1989         /* Use default specified by driver, if nb_rx_desc is zero */
1990         if (nb_rx_desc == 0) {
1991                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1992                 /* If driver default is also zero, fall back on EAL default */
1993                 if (nb_rx_desc == 0)
1994                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1995         }
1996
1997         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1998                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1999                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2000
2001                 RTE_ETHDEV_LOG(ERR,
2002                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2003                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2004                         dev_info.rx_desc_lim.nb_min,
2005                         dev_info.rx_desc_lim.nb_align);
2006                 return -EINVAL;
2007         }
2008
2009         if (dev->data->dev_started &&
2010                 !(dev_info.dev_capa &
2011                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2012                 return -EBUSY;
2013
2014         if (dev->data->dev_started &&
2015                 (dev->data->rx_queue_state[rx_queue_id] !=
2016                         RTE_ETH_QUEUE_STATE_STOPPED))
2017                 return -EBUSY;
2018
2019         rxq = dev->data->rx_queues;
2020         if (rxq[rx_queue_id]) {
2021                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2022                                         -ENOTSUP);
2023                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2024                 rxq[rx_queue_id] = NULL;
2025         }
2026
2027         if (rx_conf == NULL)
2028                 rx_conf = &dev_info.default_rxconf;
2029
2030         local_conf = *rx_conf;
2031
2032         /*
2033          * If an offloading has already been enabled in
2034          * rte_eth_dev_configure(), it has been enabled on all queues,
2035          * so there is no need to enable it in this queue again.
2036          * The local_conf.offloads input to underlying PMD only carries
2037          * those offloadings which are only enabled on this queue and
2038          * not enabled on all queues.
2039          */
2040         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2041
2042         /*
2043          * New added offloadings for this queue are those not enabled in
2044          * rte_eth_dev_configure() and they must be per-queue type.
2045          * A pure per-port offloading can't be enabled on a queue while
2046          * disabled on another queue. A pure per-port offloading can't
2047          * be enabled for any queue as new added one if it hasn't been
2048          * enabled in rte_eth_dev_configure().
2049          */
2050         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2051              local_conf.offloads) {
2052                 RTE_ETHDEV_LOG(ERR,
2053                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2054                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2055                         port_id, rx_queue_id, local_conf.offloads,
2056                         dev_info.rx_queue_offload_capa,
2057                         __func__);
2058                 return -EINVAL;
2059         }
2060
2061         /*
2062          * If LRO is enabled, check that the maximum aggregated packet
2063          * size is supported by the configured device.
2064          */
2065         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
2066                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2067                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
2068                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
2069                 int ret = check_lro_pkt_size(port_id,
2070                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
2071                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2072                                 dev_info.max_lro_pkt_size);
2073                 if (ret != 0)
2074                         return ret;
2075         }
2076
2077         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2078                                               socket_id, &local_conf, mp);
2079         if (!ret) {
2080                 if (!dev->data->min_rx_buf_size ||
2081                     dev->data->min_rx_buf_size > mbp_buf_size)
2082                         dev->data->min_rx_buf_size = mbp_buf_size;
2083         }
2084
2085         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2086                 rx_conf, ret);
2087         return eth_err(port_id, ret);
2088 }
2089
2090 int
2091 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2092                                uint16_t nb_rx_desc,
2093                                const struct rte_eth_hairpin_conf *conf)
2094 {
2095         int ret;
2096         struct rte_eth_dev *dev;
2097         struct rte_eth_hairpin_cap cap;
2098         void **rxq;
2099         int i;
2100         int count;
2101
2102         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2103
2104         dev = &rte_eth_devices[port_id];
2105         if (rx_queue_id >= dev->data->nb_rx_queues) {
2106                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
2107                 return -EINVAL;
2108         }
2109         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2110         if (ret != 0)
2111                 return ret;
2112         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
2113                                 -ENOTSUP);
2114         /* if nb_rx_desc is zero use max number of desc from the driver. */
2115         if (nb_rx_desc == 0)
2116                 nb_rx_desc = cap.max_nb_desc;
2117         if (nb_rx_desc > cap.max_nb_desc) {
2118                 RTE_ETHDEV_LOG(ERR,
2119                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2120                         nb_rx_desc, cap.max_nb_desc);
2121                 return -EINVAL;
2122         }
2123         if (conf->peer_count > cap.max_rx_2_tx) {
2124                 RTE_ETHDEV_LOG(ERR,
2125                         "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2126                         conf->peer_count, cap.max_rx_2_tx);
2127                 return -EINVAL;
2128         }
2129         if (conf->peer_count == 0) {
2130                 RTE_ETHDEV_LOG(ERR,
2131                         "Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2132                         conf->peer_count);
2133                 return -EINVAL;
2134         }
2135         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2136              cap.max_nb_queues != UINT16_MAX; i++) {
2137                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2138                         count++;
2139         }
2140         if (count > cap.max_nb_queues) {
2141                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2142                 cap.max_nb_queues);
2143                 return -EINVAL;
2144         }
2145         if (dev->data->dev_started)
2146                 return -EBUSY;
2147         rxq = dev->data->rx_queues;
2148         if (rxq[rx_queue_id] != NULL) {
2149                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
2150                                         -ENOTSUP);
2151                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
2152                 rxq[rx_queue_id] = NULL;
2153         }
2154         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2155                                                       nb_rx_desc, conf);
2156         if (ret == 0)
2157                 dev->data->rx_queue_state[rx_queue_id] =
2158                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2159         return eth_err(port_id, ret);
2160 }
2161
2162 int
2163 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2164                        uint16_t nb_tx_desc, unsigned int socket_id,
2165                        const struct rte_eth_txconf *tx_conf)
2166 {
2167         struct rte_eth_dev *dev;
2168         struct rte_eth_dev_info dev_info;
2169         struct rte_eth_txconf local_conf;
2170         void **txq;
2171         int ret;
2172
2173         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2174
2175         dev = &rte_eth_devices[port_id];
2176         if (tx_queue_id >= dev->data->nb_tx_queues) {
2177                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2178                 return -EINVAL;
2179         }
2180
2181         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2182
2183         ret = rte_eth_dev_info_get(port_id, &dev_info);
2184         if (ret != 0)
2185                 return ret;
2186
2187         /* Use default specified by driver, if nb_tx_desc is zero */
2188         if (nb_tx_desc == 0) {
2189                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2190                 /* If driver default is zero, fall back on EAL default */
2191                 if (nb_tx_desc == 0)
2192                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2193         }
2194         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2195             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2196             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2197                 RTE_ETHDEV_LOG(ERR,
2198                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2199                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2200                         dev_info.tx_desc_lim.nb_min,
2201                         dev_info.tx_desc_lim.nb_align);
2202                 return -EINVAL;
2203         }
2204
2205         if (dev->data->dev_started &&
2206                 !(dev_info.dev_capa &
2207                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2208                 return -EBUSY;
2209
2210         if (dev->data->dev_started &&
2211                 (dev->data->tx_queue_state[tx_queue_id] !=
2212                         RTE_ETH_QUEUE_STATE_STOPPED))
2213                 return -EBUSY;
2214
2215         txq = dev->data->tx_queues;
2216         if (txq[tx_queue_id]) {
2217                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2218                                         -ENOTSUP);
2219                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2220                 txq[tx_queue_id] = NULL;
2221         }
2222
2223         if (tx_conf == NULL)
2224                 tx_conf = &dev_info.default_txconf;
2225
2226         local_conf = *tx_conf;
2227
2228         /*
2229          * If an offloading has already been enabled in
2230          * rte_eth_dev_configure(), it has been enabled on all queues,
2231          * so there is no need to enable it in this queue again.
2232          * The local_conf.offloads input to underlying PMD only carries
2233          * those offloadings which are only enabled on this queue and
2234          * not enabled on all queues.
2235          */
2236         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2237
2238         /*
2239          * New added offloadings for this queue are those not enabled in
2240          * rte_eth_dev_configure() and they must be per-queue type.
2241          * A pure per-port offloading can't be enabled on a queue while
2242          * disabled on another queue. A pure per-port offloading can't
2243          * be enabled for any queue as new added one if it hasn't been
2244          * enabled in rte_eth_dev_configure().
2245          */
2246         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2247              local_conf.offloads) {
2248                 RTE_ETHDEV_LOG(ERR,
2249                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2250                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2251                         port_id, tx_queue_id, local_conf.offloads,
2252                         dev_info.tx_queue_offload_capa,
2253                         __func__);
2254                 return -EINVAL;
2255         }
2256
2257         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2258         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2259                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2260 }
2261
2262 int
2263 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2264                                uint16_t nb_tx_desc,
2265                                const struct rte_eth_hairpin_conf *conf)
2266 {
2267         struct rte_eth_dev *dev;
2268         struct rte_eth_hairpin_cap cap;
2269         void **txq;
2270         int i;
2271         int count;
2272         int ret;
2273
2274         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2275         dev = &rte_eth_devices[port_id];
2276         if (tx_queue_id >= dev->data->nb_tx_queues) {
2277                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2278                 return -EINVAL;
2279         }
2280         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2281         if (ret != 0)
2282                 return ret;
2283         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2284                                 -ENOTSUP);
2285         /* if nb_rx_desc is zero use max number of desc from the driver. */
2286         if (nb_tx_desc == 0)
2287                 nb_tx_desc = cap.max_nb_desc;
2288         if (nb_tx_desc > cap.max_nb_desc) {
2289                 RTE_ETHDEV_LOG(ERR,
2290                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2291                         nb_tx_desc, cap.max_nb_desc);
2292                 return -EINVAL;
2293         }
2294         if (conf->peer_count > cap.max_tx_2_rx) {
2295                 RTE_ETHDEV_LOG(ERR,
2296                         "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2297                         conf->peer_count, cap.max_tx_2_rx);
2298                 return -EINVAL;
2299         }
2300         if (conf->peer_count == 0) {
2301                 RTE_ETHDEV_LOG(ERR,
2302                         "Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2303                         conf->peer_count);
2304                 return -EINVAL;
2305         }
2306         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2307              cap.max_nb_queues != UINT16_MAX; i++) {
2308                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2309                         count++;
2310         }
2311         if (count > cap.max_nb_queues) {
2312                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2313                 cap.max_nb_queues);
2314                 return -EINVAL;
2315         }
2316         if (dev->data->dev_started)
2317                 return -EBUSY;
2318         txq = dev->data->tx_queues;
2319         if (txq[tx_queue_id] != NULL) {
2320                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2321                                         -ENOTSUP);
2322                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2323                 txq[tx_queue_id] = NULL;
2324         }
2325         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2326                 (dev, tx_queue_id, nb_tx_desc, conf);
2327         if (ret == 0)
2328                 dev->data->tx_queue_state[tx_queue_id] =
2329                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2330         return eth_err(port_id, ret);
2331 }
2332
2333 int
2334 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2335 {
2336         struct rte_eth_dev *dev;
2337         int ret;
2338
2339         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2340         dev = &rte_eth_devices[tx_port];
2341         if (dev->data->dev_started == 0) {
2342                 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2343                 return -EBUSY;
2344         }
2345
2346         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2347         ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2348         if (ret != 0)
2349                 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2350                                " to Rx %d (%d - all ports)\n",
2351                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2352
2353         return ret;
2354 }
2355
2356 int
2357 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2358 {
2359         struct rte_eth_dev *dev;
2360         int ret;
2361
2362         RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2363         dev = &rte_eth_devices[tx_port];
2364         if (dev->data->dev_started == 0) {
2365                 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2366                 return -EBUSY;
2367         }
2368
2369         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2370         ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2371         if (ret != 0)
2372                 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2373                                " from Rx %d (%d - all ports)\n",
2374                                tx_port, rx_port, RTE_MAX_ETHPORTS);
2375
2376         return ret;
2377 }
2378
2379 int
2380 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2381                                size_t len, uint32_t direction)
2382 {
2383         struct rte_eth_dev *dev;
2384         int ret;
2385
2386         if (peer_ports == NULL || len == 0)
2387                 return -EINVAL;
2388
2389         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2390         dev = &rte_eth_devices[port_id];
2391         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2392                                 -ENOTSUP);
2393
2394         ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2395                                                       len, direction);
2396         if (ret < 0)
2397                 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2398                                port_id, direction ? "Rx" : "Tx");
2399
2400         return ret;
2401 }
2402
2403 void
2404 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2405                 void *userdata __rte_unused)
2406 {
2407         rte_pktmbuf_free_bulk(pkts, unsent);
2408 }
2409
2410 void
2411 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2412                 void *userdata)
2413 {
2414         uint64_t *count = userdata;
2415
2416         rte_pktmbuf_free_bulk(pkts, unsent);
2417         *count += unsent;
2418 }
2419
2420 int
2421 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2422                 buffer_tx_error_fn cbfn, void *userdata)
2423 {
2424         buffer->error_callback = cbfn;
2425         buffer->error_userdata = userdata;
2426         return 0;
2427 }
2428
2429 int
2430 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2431 {
2432         int ret = 0;
2433
2434         if (buffer == NULL)
2435                 return -EINVAL;
2436
2437         buffer->size = size;
2438         if (buffer->error_callback == NULL) {
2439                 ret = rte_eth_tx_buffer_set_err_callback(
2440                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2441         }
2442
2443         return ret;
2444 }
2445
2446 int
2447 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2448 {
2449         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2450         int ret;
2451
2452         /* Validate Input Data. Bail if not valid or not supported. */
2453         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2454         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2455
2456         /* Call driver to free pending mbufs. */
2457         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2458                                                free_cnt);
2459         return eth_err(port_id, ret);
2460 }
2461
2462 int
2463 rte_eth_promiscuous_enable(uint16_t port_id)
2464 {
2465         struct rte_eth_dev *dev;
2466         int diag = 0;
2467
2468         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2469         dev = &rte_eth_devices[port_id];
2470
2471         if (dev->data->promiscuous == 1)
2472                 return 0;
2473
2474         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2475
2476         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2477         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2478
2479         return eth_err(port_id, diag);
2480 }
2481
2482 int
2483 rte_eth_promiscuous_disable(uint16_t port_id)
2484 {
2485         struct rte_eth_dev *dev;
2486         int diag = 0;
2487
2488         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2489         dev = &rte_eth_devices[port_id];
2490
2491         if (dev->data->promiscuous == 0)
2492                 return 0;
2493
2494         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2495
2496         dev->data->promiscuous = 0;
2497         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2498         if (diag != 0)
2499                 dev->data->promiscuous = 1;
2500
2501         return eth_err(port_id, diag);
2502 }
2503
2504 int
2505 rte_eth_promiscuous_get(uint16_t port_id)
2506 {
2507         struct rte_eth_dev *dev;
2508
2509         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2510
2511         dev = &rte_eth_devices[port_id];
2512         return dev->data->promiscuous;
2513 }
2514
2515 int
2516 rte_eth_allmulticast_enable(uint16_t port_id)
2517 {
2518         struct rte_eth_dev *dev;
2519         int diag;
2520
2521         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2522         dev = &rte_eth_devices[port_id];
2523
2524         if (dev->data->all_multicast == 1)
2525                 return 0;
2526
2527         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2528         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2529         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2530
2531         return eth_err(port_id, diag);
2532 }
2533
2534 int
2535 rte_eth_allmulticast_disable(uint16_t port_id)
2536 {
2537         struct rte_eth_dev *dev;
2538         int diag;
2539
2540         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2541         dev = &rte_eth_devices[port_id];
2542
2543         if (dev->data->all_multicast == 0)
2544                 return 0;
2545
2546         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2547         dev->data->all_multicast = 0;
2548         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2549         if (diag != 0)
2550                 dev->data->all_multicast = 1;
2551
2552         return eth_err(port_id, diag);
2553 }
2554
2555 int
2556 rte_eth_allmulticast_get(uint16_t port_id)
2557 {
2558         struct rte_eth_dev *dev;
2559
2560         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2561
2562         dev = &rte_eth_devices[port_id];
2563         return dev->data->all_multicast;
2564 }
2565
2566 int
2567 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2568 {
2569         struct rte_eth_dev *dev;
2570
2571         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2572         dev = &rte_eth_devices[port_id];
2573
2574         if (dev->data->dev_conf.intr_conf.lsc &&
2575             dev->data->dev_started)
2576                 rte_eth_linkstatus_get(dev, eth_link);
2577         else {
2578                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2579                 (*dev->dev_ops->link_update)(dev, 1);
2580                 *eth_link = dev->data->dev_link;
2581         }
2582
2583         return 0;
2584 }
2585
2586 int
2587 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2588 {
2589         struct rte_eth_dev *dev;
2590
2591         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2592         dev = &rte_eth_devices[port_id];
2593
2594         if (dev->data->dev_conf.intr_conf.lsc &&
2595             dev->data->dev_started)
2596                 rte_eth_linkstatus_get(dev, eth_link);
2597         else {
2598                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2599                 (*dev->dev_ops->link_update)(dev, 0);
2600                 *eth_link = dev->data->dev_link;
2601         }
2602
2603         return 0;
2604 }
2605
2606 const char *
2607 rte_eth_link_speed_to_str(uint32_t link_speed)
2608 {
2609         switch (link_speed) {
2610         case ETH_SPEED_NUM_NONE: return "None";
2611         case ETH_SPEED_NUM_10M:  return "10 Mbps";
2612         case ETH_SPEED_NUM_100M: return "100 Mbps";
2613         case ETH_SPEED_NUM_1G:   return "1 Gbps";
2614         case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2615         case ETH_SPEED_NUM_5G:   return "5 Gbps";
2616         case ETH_SPEED_NUM_10G:  return "10 Gbps";
2617         case ETH_SPEED_NUM_20G:  return "20 Gbps";
2618         case ETH_SPEED_NUM_25G:  return "25 Gbps";
2619         case ETH_SPEED_NUM_40G:  return "40 Gbps";
2620         case ETH_SPEED_NUM_50G:  return "50 Gbps";
2621         case ETH_SPEED_NUM_56G:  return "56 Gbps";
2622         case ETH_SPEED_NUM_100G: return "100 Gbps";
2623         case ETH_SPEED_NUM_200G: return "200 Gbps";
2624         case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2625         default: return "Invalid";
2626         }
2627 }
2628
2629 int
2630 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2631 {
2632         if (eth_link->link_status == ETH_LINK_DOWN)
2633                 return snprintf(str, len, "Link down");
2634         else
2635                 return snprintf(str, len, "Link up at %s %s %s",
2636                         rte_eth_link_speed_to_str(eth_link->link_speed),
2637                         (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
2638                         "FDX" : "HDX",
2639                         (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
2640                         "Autoneg" : "Fixed");
2641 }
2642
2643 int
2644 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2645 {
2646         struct rte_eth_dev *dev;
2647
2648         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2649
2650         dev = &rte_eth_devices[port_id];
2651         memset(stats, 0, sizeof(*stats));
2652
2653         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2654         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2655         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2656 }
2657
2658 int
2659 rte_eth_stats_reset(uint16_t port_id)
2660 {
2661         struct rte_eth_dev *dev;
2662         int ret;
2663
2664         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2665         dev = &rte_eth_devices[port_id];
2666
2667         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2668         ret = (*dev->dev_ops->stats_reset)(dev);
2669         if (ret != 0)
2670                 return eth_err(port_id, ret);
2671
2672         dev->data->rx_mbuf_alloc_failed = 0;
2673
2674         return 0;
2675 }
2676
2677 static inline int
2678 get_xstats_basic_count(struct rte_eth_dev *dev)
2679 {
2680         uint16_t nb_rxqs, nb_txqs;
2681         int count;
2682
2683         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2684         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2685
2686         count = RTE_NB_STATS;
2687         count += nb_rxqs * RTE_NB_RXQ_STATS;
2688         count += nb_txqs * RTE_NB_TXQ_STATS;
2689
2690         return count;
2691 }
2692
2693 static int
2694 get_xstats_count(uint16_t port_id)
2695 {
2696         struct rte_eth_dev *dev;
2697         int count;
2698
2699         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2700         dev = &rte_eth_devices[port_id];
2701         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2702                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2703                                 NULL, 0);
2704                 if (count < 0)
2705                         return eth_err(port_id, count);
2706         }
2707         if (dev->dev_ops->xstats_get_names != NULL) {
2708                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2709                 if (count < 0)
2710                         return eth_err(port_id, count);
2711         } else
2712                 count = 0;
2713
2714
2715         count += get_xstats_basic_count(dev);
2716
2717         return count;
2718 }
2719
2720 int
2721 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2722                 uint64_t *id)
2723 {
2724         int cnt_xstats, idx_xstat;
2725
2726         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2727
2728         if (!id) {
2729                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2730                 return -ENOMEM;
2731         }
2732
2733         if (!xstat_name) {
2734                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2735                 return -ENOMEM;
2736         }
2737
2738         /* Get count */
2739         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2740         if (cnt_xstats  < 0) {
2741                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2742                 return -ENODEV;
2743         }
2744
2745         /* Get id-name lookup table */
2746         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2747
2748         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2749                         port_id, xstats_names, cnt_xstats, NULL)) {
2750                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2751                 return -1;
2752         }
2753
2754         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2755                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2756                         *id = idx_xstat;
2757                         return 0;
2758                 };
2759         }
2760
2761         return -EINVAL;
2762 }
2763
2764 /* retrieve basic stats names */
2765 static int
2766 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2767         struct rte_eth_xstat_name *xstats_names)
2768 {
2769         int cnt_used_entries = 0;
2770         uint32_t idx, id_queue;
2771         uint16_t num_q;
2772
2773         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2774                 strlcpy(xstats_names[cnt_used_entries].name,
2775                         rte_stats_strings[idx].name,
2776                         sizeof(xstats_names[0].name));
2777                 cnt_used_entries++;
2778         }
2779         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2780         for (id_queue = 0; id_queue < num_q; id_queue++) {
2781                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2782                         snprintf(xstats_names[cnt_used_entries].name,
2783                                 sizeof(xstats_names[0].name),
2784                                 "rx_q%u_%s",
2785                                 id_queue, rte_rxq_stats_strings[idx].name);
2786                         cnt_used_entries++;
2787                 }
2788
2789         }
2790         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2791         for (id_queue = 0; id_queue < num_q; id_queue++) {
2792                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2793                         snprintf(xstats_names[cnt_used_entries].name,
2794                                 sizeof(xstats_names[0].name),
2795                                 "tx_q%u_%s",
2796                                 id_queue, rte_txq_stats_strings[idx].name);
2797                         cnt_used_entries++;
2798                 }
2799         }
2800         return cnt_used_entries;
2801 }
2802
2803 /* retrieve ethdev extended statistics names */
2804 int
2805 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2806         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2807         uint64_t *ids)
2808 {
2809         struct rte_eth_xstat_name *xstats_names_copy;
2810         unsigned int no_basic_stat_requested = 1;
2811         unsigned int no_ext_stat_requested = 1;
2812         unsigned int expected_entries;
2813         unsigned int basic_count;
2814         struct rte_eth_dev *dev;
2815         unsigned int i;
2816         int ret;
2817
2818         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2819         dev = &rte_eth_devices[port_id];
2820
2821         basic_count = get_xstats_basic_count(dev);
2822         ret = get_xstats_count(port_id);
2823         if (ret < 0)
2824                 return ret;
2825         expected_entries = (unsigned int)ret;
2826
2827         /* Return max number of stats if no ids given */
2828         if (!ids) {
2829                 if (!xstats_names)
2830                         return expected_entries;
2831                 else if (xstats_names && size < expected_entries)
2832                         return expected_entries;
2833         }
2834
2835         if (ids && !xstats_names)
2836                 return -EINVAL;
2837
2838         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2839                 uint64_t ids_copy[size];
2840
2841                 for (i = 0; i < size; i++) {
2842                         if (ids[i] < basic_count) {
2843                                 no_basic_stat_requested = 0;
2844                                 break;
2845                         }
2846
2847                         /*
2848                          * Convert ids to xstats ids that PMD knows.
2849                          * ids known by user are basic + extended stats.
2850                          */
2851                         ids_copy[i] = ids[i] - basic_count;
2852                 }
2853
2854                 if (no_basic_stat_requested)
2855                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2856                                         xstats_names, ids_copy, size);
2857         }
2858
2859         /* Retrieve all stats */
2860         if (!ids) {
2861                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2862                                 expected_entries);
2863                 if (num_stats < 0 || num_stats > (int)expected_entries)
2864                         return num_stats;
2865                 else
2866                         return expected_entries;
2867         }
2868
2869         xstats_names_copy = calloc(expected_entries,
2870                 sizeof(struct rte_eth_xstat_name));
2871
2872         if (!xstats_names_copy) {
2873                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2874                 return -ENOMEM;
2875         }
2876
2877         if (ids) {
2878                 for (i = 0; i < size; i++) {
2879                         if (ids[i] >= basic_count) {
2880                                 no_ext_stat_requested = 0;
2881                                 break;
2882                         }
2883                 }
2884         }
2885
2886         /* Fill xstats_names_copy structure */
2887         if (ids && no_ext_stat_requested) {
2888                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2889         } else {
2890                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2891                         expected_entries);
2892                 if (ret < 0) {
2893                         free(xstats_names_copy);
2894                         return ret;
2895                 }
2896         }
2897
2898         /* Filter stats */
2899         for (i = 0; i < size; i++) {
2900                 if (ids[i] >= expected_entries) {
2901                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2902                         free(xstats_names_copy);
2903                         return -1;
2904                 }
2905                 xstats_names[i] = xstats_names_copy[ids[i]];
2906         }
2907
2908         free(xstats_names_copy);
2909         return size;
2910 }
2911
2912 int
2913 rte_eth_xstats_get_names(uint16_t port_id,
2914         struct rte_eth_xstat_name *xstats_names,
2915         unsigned int size)
2916 {
2917         struct rte_eth_dev *dev;
2918         int cnt_used_entries;
2919         int cnt_expected_entries;
2920         int cnt_driver_entries;
2921
2922         cnt_expected_entries = get_xstats_count(port_id);
2923         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2924                         (int)size < cnt_expected_entries)
2925                 return cnt_expected_entries;
2926
2927         /* port_id checked in get_xstats_count() */
2928         dev = &rte_eth_devices[port_id];
2929
2930         cnt_used_entries = rte_eth_basic_stats_get_names(
2931                 dev, xstats_names);
2932
2933         if (dev->dev_ops->xstats_get_names != NULL) {
2934                 /* If there are any driver-specific xstats, append them
2935                  * to end of list.
2936                  */
2937                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2938                         dev,
2939                         xstats_names + cnt_used_entries,
2940                         size - cnt_used_entries);
2941                 if (cnt_driver_entries < 0)
2942                         return eth_err(port_id, cnt_driver_entries);
2943                 cnt_used_entries += cnt_driver_entries;
2944         }
2945
2946         return cnt_used_entries;
2947 }
2948
2949
2950 static int
2951 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2952 {
2953         struct rte_eth_dev *dev;
2954         struct rte_eth_stats eth_stats;
2955         unsigned int count = 0, i, q;
2956         uint64_t val, *stats_ptr;
2957         uint16_t nb_rxqs, nb_txqs;
2958         int ret;
2959
2960         ret = rte_eth_stats_get(port_id, &eth_stats);
2961         if (ret < 0)
2962                 return ret;
2963
2964         dev = &rte_eth_devices[port_id];
2965
2966         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2967         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2968
2969         /* global stats */
2970         for (i = 0; i < RTE_NB_STATS; i++) {
2971                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2972                                         rte_stats_strings[i].offset);
2973                 val = *stats_ptr;
2974                 xstats[count++].value = val;
2975         }
2976
2977         /* per-rxq stats */
2978         for (q = 0; q < nb_rxqs; q++) {
2979                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2980                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2981                                         rte_rxq_stats_strings[i].offset +
2982                                         q * sizeof(uint64_t));
2983                         val = *stats_ptr;
2984                         xstats[count++].value = val;
2985                 }
2986         }
2987
2988         /* per-txq stats */
2989         for (q = 0; q < nb_txqs; q++) {
2990                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2991                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2992                                         rte_txq_stats_strings[i].offset +
2993                                         q * sizeof(uint64_t));
2994                         val = *stats_ptr;
2995                         xstats[count++].value = val;
2996                 }
2997         }
2998         return count;
2999 }
3000
3001 /* retrieve ethdev extended statistics */
3002 int
3003 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3004                          uint64_t *values, unsigned int size)
3005 {
3006         unsigned int no_basic_stat_requested = 1;
3007         unsigned int no_ext_stat_requested = 1;
3008         unsigned int num_xstats_filled;
3009         unsigned int basic_count;
3010         uint16_t expected_entries;
3011         struct rte_eth_dev *dev;
3012         unsigned int i;
3013         int ret;
3014
3015         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3016         ret = get_xstats_count(port_id);
3017         if (ret < 0)
3018                 return ret;
3019         expected_entries = (uint16_t)ret;
3020         struct rte_eth_xstat xstats[expected_entries];
3021         dev = &rte_eth_devices[port_id];
3022         basic_count = get_xstats_basic_count(dev);
3023
3024         /* Return max number of stats if no ids given */
3025         if (!ids) {
3026                 if (!values)
3027                         return expected_entries;
3028                 else if (values && size < expected_entries)
3029                         return expected_entries;
3030         }
3031
3032         if (ids && !values)
3033                 return -EINVAL;
3034
3035         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3036                 unsigned int basic_count = get_xstats_basic_count(dev);
3037                 uint64_t ids_copy[size];
3038
3039                 for (i = 0; i < size; i++) {
3040                         if (ids[i] < basic_count) {
3041                                 no_basic_stat_requested = 0;
3042                                 break;
3043                         }
3044
3045                         /*
3046                          * Convert ids to xstats ids that PMD knows.
3047                          * ids known by user are basic + extended stats.
3048                          */
3049                         ids_copy[i] = ids[i] - basic_count;
3050                 }
3051
3052                 if (no_basic_stat_requested)
3053                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3054                                         values, size);
3055         }
3056
3057         if (ids) {
3058                 for (i = 0; i < size; i++) {
3059                         if (ids[i] >= basic_count) {
3060                                 no_ext_stat_requested = 0;
3061                                 break;
3062                         }
3063                 }
3064         }
3065
3066         /* Fill the xstats structure */
3067         if (ids && no_ext_stat_requested)
3068                 ret = rte_eth_basic_stats_get(port_id, xstats);
3069         else
3070                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3071
3072         if (ret < 0)
3073                 return ret;
3074         num_xstats_filled = (unsigned int)ret;
3075
3076         /* Return all stats */
3077         if (!ids) {
3078                 for (i = 0; i < num_xstats_filled; i++)
3079                         values[i] = xstats[i].value;
3080                 return expected_entries;
3081         }
3082
3083         /* Filter stats */
3084         for (i = 0; i < size; i++) {
3085                 if (ids[i] >= expected_entries) {
3086                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3087                         return -1;
3088                 }
3089                 values[i] = xstats[ids[i]].value;
3090         }
3091         return size;
3092 }
3093
3094 int
3095 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3096         unsigned int n)
3097 {
3098         struct rte_eth_dev *dev;
3099         unsigned int count = 0, i;
3100         signed int xcount = 0;
3101         uint16_t nb_rxqs, nb_txqs;
3102         int ret;
3103
3104         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3105
3106         dev = &rte_eth_devices[port_id];
3107
3108         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3109         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3110
3111         /* Return generic statistics */
3112         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
3113                 (nb_txqs * RTE_NB_TXQ_STATS);
3114
3115         /* implemented by the driver */
3116         if (dev->dev_ops->xstats_get != NULL) {
3117                 /* Retrieve the xstats from the driver at the end of the
3118                  * xstats struct.
3119                  */
3120                 xcount = (*dev->dev_ops->xstats_get)(dev,
3121                                      xstats ? xstats + count : NULL,
3122                                      (n > count) ? n - count : 0);
3123
3124                 if (xcount < 0)
3125                         return eth_err(port_id, xcount);
3126         }
3127
3128         if (n < count + xcount || xstats == NULL)
3129                 return count + xcount;
3130
3131         /* now fill the xstats structure */
3132         ret = rte_eth_basic_stats_get(port_id, xstats);
3133         if (ret < 0)
3134                 return ret;
3135         count = ret;
3136
3137         for (i = 0; i < count; i++)
3138                 xstats[i].id = i;
3139         /* add an offset to driver-specific stats */
3140         for ( ; i < count + xcount; i++)
3141                 xstats[i].id += count;
3142
3143         return count + xcount;
3144 }
3145
3146 /* reset ethdev extended statistics */
3147 int
3148 rte_eth_xstats_reset(uint16_t port_id)
3149 {
3150         struct rte_eth_dev *dev;
3151
3152         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3153         dev = &rte_eth_devices[port_id];
3154
3155         /* implemented by the driver */
3156         if (dev->dev_ops->xstats_reset != NULL)
3157                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3158
3159         /* fallback to default */
3160         return rte_eth_stats_reset(port_id);
3161 }
3162
3163 static int
3164 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
3165                 uint8_t is_rx)
3166 {
3167         struct rte_eth_dev *dev;
3168
3169         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3170
3171         dev = &rte_eth_devices[port_id];
3172
3173         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3174
3175         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3176                 return -EINVAL;
3177
3178         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3179                 return -EINVAL;
3180
3181         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3182                 return -EINVAL;
3183
3184         return (*dev->dev_ops->queue_stats_mapping_set)
3185                         (dev, queue_id, stat_idx, is_rx);
3186 }
3187
3188
3189 int
3190 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3191                 uint8_t stat_idx)
3192 {
3193         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
3194                                                 stat_idx, STAT_QMAP_TX));
3195 }
3196
3197
3198 int
3199 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3200                 uint8_t stat_idx)
3201 {
3202         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
3203                                                 stat_idx, STAT_QMAP_RX));
3204 }
3205
3206 int
3207 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3208 {
3209         struct rte_eth_dev *dev;
3210
3211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3212         dev = &rte_eth_devices[port_id];
3213
3214         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3215         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3216                                                         fw_version, fw_size));
3217 }
3218
3219 int
3220 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3221 {
3222         struct rte_eth_dev *dev;
3223         const struct rte_eth_desc_lim lim = {
3224                 .nb_max = UINT16_MAX,
3225                 .nb_min = 0,
3226                 .nb_align = 1,
3227                 .nb_seg_max = UINT16_MAX,
3228                 .nb_mtu_seg_max = UINT16_MAX,
3229         };
3230         int diag;
3231
3232         /*
3233          * Init dev_info before port_id check since caller does not have
3234          * return status and does not know if get is successful or not.
3235          */
3236         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3237         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3238
3239         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3240         dev = &rte_eth_devices[port_id];
3241
3242         dev_info->rx_desc_lim = lim;
3243         dev_info->tx_desc_lim = lim;
3244         dev_info->device = dev->device;
3245         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3246         dev_info->max_mtu = UINT16_MAX;
3247
3248         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3249         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3250         if (diag != 0) {
3251                 /* Cleanup already filled in device information */
3252                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3253                 return eth_err(port_id, diag);
3254         }
3255
3256         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3257         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3258                         RTE_MAX_QUEUES_PER_PORT);
3259         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3260                         RTE_MAX_QUEUES_PER_PORT);
3261
3262         dev_info->driver_name = dev->device->driver->name;
3263         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3264         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3265
3266         dev_info->dev_flags = &dev->data->dev_flags;
3267
3268         return 0;
3269 }
3270
3271 int
3272 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3273                                  uint32_t *ptypes, int num)
3274 {
3275         int i, j;
3276         struct rte_eth_dev *dev;
3277         const uint32_t *all_ptypes;
3278
3279         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3280         dev = &rte_eth_devices[port_id];
3281         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3282         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3283
3284         if (!all_ptypes)
3285                 return 0;
3286
3287         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3288                 if (all_ptypes[i] & ptype_mask) {
3289                         if (j < num)
3290                                 ptypes[j] = all_ptypes[i];
3291                         j++;
3292                 }
3293
3294         return j;
3295 }
3296
3297 int
3298 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3299                                  uint32_t *set_ptypes, unsigned int num)
3300 {
3301         const uint32_t valid_ptype_masks[] = {
3302                 RTE_PTYPE_L2_MASK,
3303                 RTE_PTYPE_L3_MASK,
3304                 RTE_PTYPE_L4_MASK,
3305                 RTE_PTYPE_TUNNEL_MASK,
3306                 RTE_PTYPE_INNER_L2_MASK,
3307                 RTE_PTYPE_INNER_L3_MASK,
3308                 RTE_PTYPE_INNER_L4_MASK,
3309         };
3310         const uint32_t *all_ptypes;
3311         struct rte_eth_dev *dev;
3312         uint32_t unused_mask;
3313         unsigned int i, j;
3314         int ret;
3315
3316         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3317         dev = &rte_eth_devices[port_id];
3318
3319         if (num > 0 && set_ptypes == NULL)
3320                 return -EINVAL;
3321
3322         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3323                         *dev->dev_ops->dev_ptypes_set == NULL) {
3324                 ret = 0;
3325                 goto ptype_unknown;
3326         }
3327
3328         if (ptype_mask == 0) {
3329                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3330                                 ptype_mask);
3331                 goto ptype_unknown;
3332         }
3333
3334         unused_mask = ptype_mask;
3335         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3336                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3337                 if (mask && mask != valid_ptype_masks[i]) {
3338                         ret = -EINVAL;
3339                         goto ptype_unknown;
3340                 }
3341                 unused_mask &= ~valid_ptype_masks[i];
3342         }
3343
3344         if (unused_mask) {
3345                 ret = -EINVAL;
3346                 goto ptype_unknown;
3347         }
3348
3349         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3350         if (all_ptypes == NULL) {
3351                 ret = 0;
3352                 goto ptype_unknown;
3353         }
3354
3355         /*
3356          * Accommodate as many set_ptypes as possible. If the supplied
3357          * set_ptypes array is insufficient fill it partially.
3358          */
3359         for (i = 0, j = 0; set_ptypes != NULL &&
3360                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3361                 if (ptype_mask & all_ptypes[i]) {
3362                         if (j < num - 1) {
3363                                 set_ptypes[j] = all_ptypes[i];
3364                                 j++;
3365                                 continue;
3366                         }
3367                         break;
3368                 }
3369         }
3370
3371         if (set_ptypes != NULL && j < num)
3372                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3373
3374         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3375
3376 ptype_unknown:
3377         if (num > 0)
3378                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3379
3380         return ret;
3381 }
3382
3383 int
3384 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3385 {
3386         struct rte_eth_dev *dev;
3387
3388         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3389         dev = &rte_eth_devices[port_id];
3390         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3391
3392         return 0;
3393 }
3394
3395 int
3396 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3397 {
3398         struct rte_eth_dev *dev;
3399
3400         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3401
3402         dev = &rte_eth_devices[port_id];
3403         *mtu = dev->data->mtu;
3404         return 0;
3405 }
3406
3407 int
3408 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3409 {
3410         int ret;
3411         struct rte_eth_dev_info dev_info;
3412         struct rte_eth_dev *dev;
3413
3414         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3415         dev = &rte_eth_devices[port_id];
3416         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3417
3418         /*
3419          * Check if the device supports dev_infos_get, if it does not
3420          * skip min_mtu/max_mtu validation here as this requires values
3421          * that are populated within the call to rte_eth_dev_info_get()
3422          * which relies on dev->dev_ops->dev_infos_get.
3423          */
3424         if (*dev->dev_ops->dev_infos_get != NULL) {
3425                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3426                 if (ret != 0)
3427                         return ret;
3428
3429                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3430                         return -EINVAL;
3431         }
3432
3433         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3434         if (!ret)
3435                 dev->data->mtu = mtu;
3436
3437         return eth_err(port_id, ret);
3438 }
3439
3440 int
3441 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3442 {
3443         struct rte_eth_dev *dev;
3444         int ret;
3445
3446         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3447         dev = &rte_eth_devices[port_id];
3448         if (!(dev->data->dev_conf.rxmode.offloads &
3449               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3450                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3451                         port_id);
3452                 return -ENOSYS;
3453         }
3454
3455         if (vlan_id > 4095) {
3456                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3457                         port_id, vlan_id);
3458                 return -EINVAL;
3459         }
3460         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3461
3462         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3463         if (ret == 0) {
3464                 struct rte_vlan_filter_conf *vfc;
3465                 int vidx;
3466                 int vbit;
3467
3468                 vfc = &dev->data->vlan_filter_conf;
3469                 vidx = vlan_id / 64;
3470                 vbit = vlan_id % 64;
3471
3472                 if (on)
3473                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3474                 else
3475                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3476         }
3477
3478         return eth_err(port_id, ret);
3479 }
3480
3481 int
3482 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3483                                     int on)
3484 {
3485         struct rte_eth_dev *dev;
3486
3487         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3488         dev = &rte_eth_devices[port_id];
3489         if (rx_queue_id >= dev->data->nb_rx_queues) {
3490                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3491                 return -EINVAL;
3492         }
3493
3494         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3495         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3496
3497         return 0;
3498 }
3499
3500 int
3501 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3502                                 enum rte_vlan_type vlan_type,
3503                                 uint16_t tpid)
3504 {
3505         struct rte_eth_dev *dev;
3506
3507         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3508         dev = &rte_eth_devices[port_id];
3509         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3510
3511         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3512                                                                tpid));
3513 }
3514
3515 int
3516 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3517 {
3518         struct rte_eth_dev_info dev_info;
3519         struct rte_eth_dev *dev;
3520         int ret = 0;
3521         int mask = 0;
3522         int cur, org = 0;
3523         uint64_t orig_offloads;
3524         uint64_t dev_offloads;
3525         uint64_t new_offloads;
3526
3527         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3528         dev = &rte_eth_devices[port_id];
3529
3530         /* save original values in case of failure */
3531         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3532         dev_offloads = orig_offloads;
3533
3534         /* check which option changed by application */
3535         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3536         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3537         if (cur != org) {
3538                 if (cur)
3539                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3540                 else
3541                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3542                 mask |= ETH_VLAN_STRIP_MASK;
3543         }
3544
3545         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3546         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3547         if (cur != org) {
3548                 if (cur)
3549                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3550                 else
3551                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3552                 mask |= ETH_VLAN_FILTER_MASK;
3553         }
3554
3555         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3556         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3557         if (cur != org) {
3558                 if (cur)
3559                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3560                 else
3561                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3562                 mask |= ETH_VLAN_EXTEND_MASK;
3563         }
3564
3565         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3566         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3567         if (cur != org) {
3568                 if (cur)
3569                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3570                 else
3571                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3572                 mask |= ETH_QINQ_STRIP_MASK;
3573         }
3574
3575         /*no change*/
3576         if (mask == 0)
3577                 return ret;
3578
3579         ret = rte_eth_dev_info_get(port_id, &dev_info);
3580         if (ret != 0)
3581                 return ret;
3582
3583         /* Rx VLAN offloading must be within its device capabilities */
3584         if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3585                 new_offloads = dev_offloads & ~orig_offloads;
3586                 RTE_ETHDEV_LOG(ERR,
3587                         "Ethdev port_id=%u requested new added VLAN offloads "
3588                         "0x%" PRIx64 " must be within Rx offloads capabilities "
3589                         "0x%" PRIx64 " in %s()\n",
3590                         port_id, new_offloads, dev_info.rx_offload_capa,
3591                         __func__);
3592                 return -EINVAL;
3593         }
3594
3595         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3596         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3597         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3598         if (ret) {
3599                 /* hit an error restore  original values */
3600                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3601         }
3602
3603         return eth_err(port_id, ret);
3604 }
3605
3606 int
3607 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3608 {
3609         struct rte_eth_dev *dev;
3610         uint64_t *dev_offloads;
3611         int ret = 0;
3612
3613         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3614         dev = &rte_eth_devices[port_id];
3615         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3616
3617         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3618                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3619
3620         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3621                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3622
3623         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3624                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3625
3626         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3627                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3628
3629         return ret;
3630 }
3631
3632 int
3633 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3634 {
3635         struct rte_eth_dev *dev;
3636
3637         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3638         dev = &rte_eth_devices[port_id];
3639         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3640
3641         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3642 }
3643
3644 int
3645 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3646 {
3647         struct rte_eth_dev *dev;
3648
3649         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3650         dev = &rte_eth_devices[port_id];
3651         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3652         memset(fc_conf, 0, sizeof(*fc_conf));
3653         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3654 }
3655
3656 int
3657 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3658 {
3659         struct rte_eth_dev *dev;
3660
3661         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3662         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3663                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3664                 return -EINVAL;
3665         }
3666
3667         dev = &rte_eth_devices[port_id];
3668         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3669         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3670 }
3671
3672 int
3673 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3674                                    struct rte_eth_pfc_conf *pfc_conf)
3675 {
3676         struct rte_eth_dev *dev;
3677
3678         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3679         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3680                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3681                 return -EINVAL;
3682         }
3683
3684         dev = &rte_eth_devices[port_id];
3685         /* High water, low water validation are device specific */
3686         if  (*dev->dev_ops->priority_flow_ctrl_set)
3687                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3688                                         (dev, pfc_conf));
3689         return -ENOTSUP;
3690 }
3691
3692 static int
3693 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3694                         uint16_t reta_size)
3695 {
3696         uint16_t i, num;
3697
3698         if (!reta_conf)
3699                 return -EINVAL;
3700
3701         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3702         for (i = 0; i < num; i++) {
3703                 if (reta_conf[i].mask)
3704                         return 0;
3705         }
3706
3707         return -EINVAL;
3708 }
3709
3710 static int
3711 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3712                          uint16_t reta_size,
3713                          uint16_t max_rxq)
3714 {
3715         uint16_t i, idx, shift;
3716
3717         if (!reta_conf)
3718                 return -EINVAL;
3719
3720         if (max_rxq == 0) {
3721                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3722                 return -EINVAL;
3723         }
3724
3725         for (i = 0; i < reta_size; i++) {
3726                 idx = i / RTE_RETA_GROUP_SIZE;
3727                 shift = i % RTE_RETA_GROUP_SIZE;
3728                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3729                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3730                         RTE_ETHDEV_LOG(ERR,
3731                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3732                                 idx, shift,
3733                                 reta_conf[idx].reta[shift], max_rxq);
3734                         return -EINVAL;
3735                 }
3736         }
3737
3738         return 0;
3739 }
3740
3741 int
3742 rte_eth_dev_rss_reta_update(uint16_t port_id,
3743                             struct rte_eth_rss_reta_entry64 *reta_conf,
3744                             uint16_t reta_size)
3745 {
3746         struct rte_eth_dev *dev;
3747         int ret;
3748
3749         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3750         /* Check mask bits */
3751         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3752         if (ret < 0)
3753                 return ret;
3754
3755         dev = &rte_eth_devices[port_id];
3756
3757         /* Check entry value */
3758         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3759                                 dev->data->nb_rx_queues);
3760         if (ret < 0)
3761                 return ret;
3762
3763         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3764         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3765                                                              reta_size));
3766 }
3767
3768 int
3769 rte_eth_dev_rss_reta_query(uint16_t port_id,
3770                            struct rte_eth_rss_reta_entry64 *reta_conf,
3771                            uint16_t reta_size)
3772 {
3773         struct rte_eth_dev *dev;
3774         int ret;
3775
3776         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3777
3778         /* Check mask bits */
3779         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3780         if (ret < 0)
3781                 return ret;
3782
3783         dev = &rte_eth_devices[port_id];
3784         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3785         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3786                                                             reta_size));
3787 }
3788
3789 int
3790 rte_eth_dev_rss_hash_update(uint16_t port_id,
3791                             struct rte_eth_rss_conf *rss_conf)
3792 {
3793         struct rte_eth_dev *dev;
3794         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3795         int ret;
3796
3797         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3798
3799         ret = rte_eth_dev_info_get(port_id, &dev_info);
3800         if (ret != 0)
3801                 return ret;
3802
3803         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3804
3805         dev = &rte_eth_devices[port_id];
3806         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3807             dev_info.flow_type_rss_offloads) {
3808                 RTE_ETHDEV_LOG(ERR,
3809                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3810                         port_id, rss_conf->rss_hf,
3811                         dev_info.flow_type_rss_offloads);
3812                 return -EINVAL;
3813         }
3814         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3815         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3816                                                                  rss_conf));
3817 }
3818
3819 int
3820 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3821                               struct rte_eth_rss_conf *rss_conf)
3822 {
3823         struct rte_eth_dev *dev;
3824
3825         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3826         dev = &rte_eth_devices[port_id];
3827         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3828         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3829                                                                    rss_conf));
3830 }
3831
3832 int
3833 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3834                                 struct rte_eth_udp_tunnel *udp_tunnel)
3835 {
3836         struct rte_eth_dev *dev;
3837
3838         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3839         if (udp_tunnel == NULL) {
3840                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3841                 return -EINVAL;
3842         }
3843
3844         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3845                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3846                 return -EINVAL;
3847         }
3848
3849         dev = &rte_eth_devices[port_id];
3850         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3851         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3852                                                                 udp_tunnel));
3853 }
3854
3855 int
3856 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3857                                    struct rte_eth_udp_tunnel *udp_tunnel)
3858 {
3859         struct rte_eth_dev *dev;
3860
3861         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3862         dev = &rte_eth_devices[port_id];
3863
3864         if (udp_tunnel == NULL) {
3865                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3866                 return -EINVAL;
3867         }
3868
3869         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3870                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3871                 return -EINVAL;
3872         }
3873
3874         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3875         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3876                                                                 udp_tunnel));
3877 }
3878
3879 int
3880 rte_eth_led_on(uint16_t port_id)
3881 {
3882         struct rte_eth_dev *dev;
3883
3884         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3885         dev = &rte_eth_devices[port_id];
3886         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3887         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3888 }
3889
3890 int
3891 rte_eth_led_off(uint16_t port_id)
3892 {
3893         struct rte_eth_dev *dev;
3894
3895         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3896         dev = &rte_eth_devices[port_id];
3897         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3898         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3899 }
3900
3901 int
3902 rte_eth_fec_get_capability(uint16_t port_id,
3903                            struct rte_eth_fec_capa *speed_fec_capa,
3904                            unsigned int num)
3905 {
3906         struct rte_eth_dev *dev;
3907         int ret;
3908
3909         if (speed_fec_capa == NULL && num > 0)
3910                 return -EINVAL;
3911
3912         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3913         dev = &rte_eth_devices[port_id];
3914         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
3915         ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
3916
3917         return ret;
3918 }
3919
3920 int
3921 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
3922 {
3923         struct rte_eth_dev *dev;
3924
3925         if (fec_capa == NULL)
3926                 return -EINVAL;
3927
3928         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3929         dev = &rte_eth_devices[port_id];
3930         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
3931         return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
3932 }
3933
3934 int
3935 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
3936 {
3937         struct rte_eth_dev *dev;
3938
3939         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3940         dev = &rte_eth_devices[port_id];
3941         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
3942         return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
3943 }
3944
3945 /*
3946  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3947  * an empty spot.
3948  */
3949 static int
3950 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3951 {
3952         struct rte_eth_dev_info dev_info;
3953         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3954         unsigned i;
3955         int ret;
3956
3957         ret = rte_eth_dev_info_get(port_id, &dev_info);
3958         if (ret != 0)
3959                 return -1;
3960
3961         for (i = 0; i < dev_info.max_mac_addrs; i++)
3962                 if (memcmp(addr, &dev->data->mac_addrs[i],
3963                                 RTE_ETHER_ADDR_LEN) == 0)
3964                         return i;
3965
3966         return -1;
3967 }
3968
3969 static const struct rte_ether_addr null_mac_addr;
3970
3971 int
3972 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3973                         uint32_t pool)
3974 {
3975         struct rte_eth_dev *dev;
3976         int index;
3977         uint64_t pool_mask;
3978         int ret;
3979
3980         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3981         dev = &rte_eth_devices[port_id];
3982         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3983
3984         if (rte_is_zero_ether_addr(addr)) {
3985                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3986                         port_id);
3987                 return -EINVAL;
3988         }
3989         if (pool >= ETH_64_POOLS) {
3990                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3991                 return -EINVAL;
3992         }
3993
3994         index = get_mac_addr_index(port_id, addr);
3995         if (index < 0) {
3996                 index = get_mac_addr_index(port_id, &null_mac_addr);
3997                 if (index < 0) {
3998                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3999                                 port_id);
4000                         return -ENOSPC;
4001                 }
4002         } else {
4003                 pool_mask = dev->data->mac_pool_sel[index];
4004
4005                 /* Check if both MAC address and pool is already there, and do nothing */
4006                 if (pool_mask & (1ULL << pool))
4007                         return 0;
4008         }
4009
4010         /* Update NIC */
4011         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4012
4013         if (ret == 0) {
4014                 /* Update address in NIC data structure */
4015                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4016
4017                 /* Update pool bitmap in NIC data structure */
4018                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
4019         }
4020
4021         return eth_err(port_id, ret);
4022 }
4023
4024 int
4025 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4026 {
4027         struct rte_eth_dev *dev;
4028         int index;
4029
4030         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4031         dev = &rte_eth_devices[port_id];
4032         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4033
4034         index = get_mac_addr_index(port_id, addr);
4035         if (index == 0) {
4036                 RTE_ETHDEV_LOG(ERR,
4037                         "Port %u: Cannot remove default MAC address\n",
4038                         port_id);
4039                 return -EADDRINUSE;
4040         } else if (index < 0)
4041                 return 0;  /* Do nothing if address wasn't found */
4042
4043         /* Update NIC */
4044         (*dev->dev_ops->mac_addr_remove)(dev, index);
4045
4046         /* Update address in NIC data structure */
4047         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4048
4049         /* reset pool bitmap */
4050         dev->data->mac_pool_sel[index] = 0;
4051
4052         return 0;
4053 }
4054
4055 int
4056 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4057 {
4058         struct rte_eth_dev *dev;
4059         int ret;
4060
4061         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4062
4063         if (!rte_is_valid_assigned_ether_addr(addr))
4064                 return -EINVAL;
4065
4066         dev = &rte_eth_devices[port_id];
4067         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4068
4069         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4070         if (ret < 0)
4071                 return ret;
4072
4073         /* Update default address in NIC data structure */
4074         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4075
4076         return 0;
4077 }
4078
4079
4080 /*
4081  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4082  * an empty spot.
4083  */
4084 static int
4085 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4086 {
4087         struct rte_eth_dev_info dev_info;
4088         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4089         unsigned i;
4090         int ret;
4091
4092         ret = rte_eth_dev_info_get(port_id, &dev_info);
4093         if (ret != 0)
4094                 return -1;
4095
4096         if (!dev->data->hash_mac_addrs)
4097                 return -1;
4098
4099         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4100                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4101                         RTE_ETHER_ADDR_LEN) == 0)
4102                         return i;
4103
4104         return -1;
4105 }
4106
4107 int
4108 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4109                                 uint8_t on)
4110 {
4111         int index;
4112         int ret;
4113         struct rte_eth_dev *dev;
4114
4115         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4116
4117         dev = &rte_eth_devices[port_id];
4118         if (rte_is_zero_ether_addr(addr)) {
4119                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4120                         port_id);
4121                 return -EINVAL;
4122         }
4123
4124         index = get_hash_mac_addr_index(port_id, addr);
4125         /* Check if it's already there, and do nothing */
4126         if ((index >= 0) && on)
4127                 return 0;
4128
4129         if (index < 0) {
4130                 if (!on) {
4131                         RTE_ETHDEV_LOG(ERR,
4132                                 "Port %u: the MAC address was not set in UTA\n",
4133                                 port_id);
4134                         return -EINVAL;
4135                 }
4136
4137                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
4138                 if (index < 0) {
4139                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4140                                 port_id);
4141                         return -ENOSPC;
4142                 }
4143         }
4144
4145         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4146         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4147         if (ret == 0) {
4148                 /* Update address in NIC data structure */
4149                 if (on)
4150                         rte_ether_addr_copy(addr,
4151                                         &dev->data->hash_mac_addrs[index]);
4152                 else
4153                         rte_ether_addr_copy(&null_mac_addr,
4154                                         &dev->data->hash_mac_addrs[index]);
4155         }
4156
4157         return eth_err(port_id, ret);
4158 }
4159
4160 int
4161 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4162 {
4163         struct rte_eth_dev *dev;
4164
4165         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4166
4167         dev = &rte_eth_devices[port_id];
4168
4169         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4170         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4171                                                                        on));
4172 }
4173
4174 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4175                                         uint16_t tx_rate)
4176 {
4177         struct rte_eth_dev *dev;
4178         struct rte_eth_dev_info dev_info;
4179         struct rte_eth_link link;
4180         int ret;
4181
4182         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4183
4184         ret = rte_eth_dev_info_get(port_id, &dev_info);
4185         if (ret != 0)
4186                 return ret;
4187
4188         dev = &rte_eth_devices[port_id];
4189         link = dev->data->dev_link;
4190
4191         if (queue_idx > dev_info.max_tx_queues) {
4192                 RTE_ETHDEV_LOG(ERR,
4193                         "Set queue rate limit:port %u: invalid queue id=%u\n",
4194                         port_id, queue_idx);
4195                 return -EINVAL;
4196         }
4197
4198         if (tx_rate > link.link_speed) {
4199                 RTE_ETHDEV_LOG(ERR,
4200                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4201                         tx_rate, link.link_speed);
4202                 return -EINVAL;
4203         }
4204
4205         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4206         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4207                                                         queue_idx, tx_rate));
4208 }
4209
4210 int
4211 rte_eth_mirror_rule_set(uint16_t port_id,
4212                         struct rte_eth_mirror_conf *mirror_conf,
4213                         uint8_t rule_id, uint8_t on)
4214 {
4215         struct rte_eth_dev *dev;
4216
4217         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4218         if (mirror_conf->rule_type == 0) {
4219                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
4220                 return -EINVAL;
4221         }
4222
4223         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
4224                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
4225                         ETH_64_POOLS - 1);
4226                 return -EINVAL;
4227         }
4228
4229         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
4230              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
4231             (mirror_conf->pool_mask == 0)) {
4232                 RTE_ETHDEV_LOG(ERR,
4233                         "Invalid mirror pool, pool mask can not be 0\n");
4234                 return -EINVAL;
4235         }
4236
4237         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
4238             mirror_conf->vlan.vlan_mask == 0) {
4239                 RTE_ETHDEV_LOG(ERR,
4240                         "Invalid vlan mask, vlan mask can not be 0\n");
4241                 return -EINVAL;
4242         }
4243
4244         dev = &rte_eth_devices[port_id];
4245         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
4246
4247         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
4248                                                 mirror_conf, rule_id, on));
4249 }
4250
4251 int
4252 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
4253 {
4254         struct rte_eth_dev *dev;
4255
4256         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4257
4258         dev = &rte_eth_devices[port_id];
4259         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
4260
4261         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
4262                                                                    rule_id));
4263 }
4264
4265 RTE_INIT(eth_dev_init_cb_lists)
4266 {
4267         int i;
4268
4269         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4270                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4271 }
4272
4273 int
4274 rte_eth_dev_callback_register(uint16_t port_id,
4275                         enum rte_eth_event_type event,
4276                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4277 {
4278         struct rte_eth_dev *dev;
4279         struct rte_eth_dev_callback *user_cb;
4280         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4281         uint16_t last_port;
4282
4283         if (!cb_fn)
4284                 return -EINVAL;
4285
4286         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4287                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4288                 return -EINVAL;
4289         }
4290
4291         if (port_id == RTE_ETH_ALL) {
4292                 next_port = 0;
4293                 last_port = RTE_MAX_ETHPORTS - 1;
4294         } else {
4295                 next_port = last_port = port_id;
4296         }
4297
4298         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4299
4300         do {
4301                 dev = &rte_eth_devices[next_port];
4302
4303                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4304                         if (user_cb->cb_fn == cb_fn &&
4305                                 user_cb->cb_arg == cb_arg &&
4306                                 user_cb->event == event) {
4307                                 break;
4308                         }
4309                 }
4310
4311                 /* create a new callback. */
4312                 if (user_cb == NULL) {
4313                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4314                                 sizeof(struct rte_eth_dev_callback), 0);
4315                         if (user_cb != NULL) {
4316                                 user_cb->cb_fn = cb_fn;
4317                                 user_cb->cb_arg = cb_arg;
4318                                 user_cb->event = event;
4319                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4320                                                   user_cb, next);
4321                         } else {
4322                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4323                                 rte_eth_dev_callback_unregister(port_id, event,
4324                                                                 cb_fn, cb_arg);
4325                                 return -ENOMEM;
4326                         }
4327
4328                 }
4329         } while (++next_port <= last_port);
4330
4331         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4332         return 0;
4333 }
4334
4335 int
4336 rte_eth_dev_callback_unregister(uint16_t port_id,
4337                         enum rte_eth_event_type event,
4338                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4339 {
4340         int ret;
4341         struct rte_eth_dev *dev;
4342         struct rte_eth_dev_callback *cb, *next;
4343         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4344         uint16_t last_port;
4345
4346         if (!cb_fn)
4347                 return -EINVAL;
4348
4349         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4350                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4351                 return -EINVAL;
4352         }
4353
4354         if (port_id == RTE_ETH_ALL) {
4355                 next_port = 0;
4356                 last_port = RTE_MAX_ETHPORTS - 1;
4357         } else {
4358                 next_port = last_port = port_id;
4359         }
4360
4361         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4362
4363         do {
4364                 dev = &rte_eth_devices[next_port];
4365                 ret = 0;
4366                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4367                      cb = next) {
4368
4369                         next = TAILQ_NEXT(cb, next);
4370
4371                         if (cb->cb_fn != cb_fn || cb->event != event ||
4372                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4373                                 continue;
4374
4375                         /*
4376                          * if this callback is not executing right now,
4377                          * then remove it.
4378                          */
4379                         if (cb->active == 0) {
4380                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4381                                 rte_free(cb);
4382                         } else {
4383                                 ret = -EAGAIN;
4384                         }
4385                 }
4386         } while (++next_port <= last_port);
4387
4388         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4389         return ret;
4390 }
4391
4392 int
4393 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4394         enum rte_eth_event_type event, void *ret_param)
4395 {
4396         struct rte_eth_dev_callback *cb_lst;
4397         struct rte_eth_dev_callback dev_cb;
4398         int rc = 0;
4399
4400         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4401         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4402                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4403                         continue;
4404                 dev_cb = *cb_lst;
4405                 cb_lst->active = 1;
4406                 if (ret_param != NULL)
4407                         dev_cb.ret_param = ret_param;
4408
4409                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4410                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4411                                 dev_cb.cb_arg, dev_cb.ret_param);
4412                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4413                 cb_lst->active = 0;
4414         }
4415         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4416         return rc;
4417 }
4418
4419 void
4420 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4421 {
4422         if (dev == NULL)
4423                 return;
4424
4425         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4426
4427         dev->state = RTE_ETH_DEV_ATTACHED;
4428 }
4429
4430 int
4431 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4432 {
4433         uint32_t vec;
4434         struct rte_eth_dev *dev;
4435         struct rte_intr_handle *intr_handle;
4436         uint16_t qid;
4437         int rc;
4438
4439         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4440
4441         dev = &rte_eth_devices[port_id];
4442
4443         if (!dev->intr_handle) {
4444                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4445                 return -ENOTSUP;
4446         }
4447
4448         intr_handle = dev->intr_handle;
4449         if (!intr_handle->intr_vec) {
4450                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4451                 return -EPERM;
4452         }
4453
4454         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4455                 vec = intr_handle->intr_vec[qid];
4456                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4457                 if (rc && rc != -EEXIST) {
4458                         RTE_ETHDEV_LOG(ERR,
4459                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4460                                 port_id, qid, op, epfd, vec);
4461                 }
4462         }
4463
4464         return 0;
4465 }
4466
4467 int
4468 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4469 {
4470         struct rte_intr_handle *intr_handle;
4471         struct rte_eth_dev *dev;
4472         unsigned int efd_idx;
4473         uint32_t vec;
4474         int fd;
4475
4476         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4477
4478         dev = &rte_eth_devices[port_id];
4479
4480         if (queue_id >= dev->data->nb_rx_queues) {
4481                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4482                 return -1;
4483         }
4484
4485         if (!dev->intr_handle) {
4486                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4487                 return -1;
4488         }
4489
4490         intr_handle = dev->intr_handle;
4491         if (!intr_handle->intr_vec) {
4492                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4493                 return -1;
4494         }
4495
4496         vec = intr_handle->intr_vec[queue_id];
4497         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4498                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4499         fd = intr_handle->efds[efd_idx];
4500
4501         return fd;
4502 }
4503
4504 static inline int
4505 eth_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
4506                 const char *ring_name)
4507 {
4508         return snprintf(name, len, "eth_p%d_q%d_%s",
4509                         port_id, queue_id, ring_name);
4510 }
4511
4512 const struct rte_memzone *
4513 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4514                          uint16_t queue_id, size_t size, unsigned align,
4515                          int socket_id)
4516 {
4517         char z_name[RTE_MEMZONE_NAMESIZE];
4518         const struct rte_memzone *mz;
4519         int rc;
4520
4521         rc = eth_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4522                         queue_id, ring_name);
4523         if (rc >= RTE_MEMZONE_NAMESIZE) {
4524                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4525                 rte_errno = ENAMETOOLONG;
4526                 return NULL;
4527         }
4528
4529         mz = rte_memzone_lookup(z_name);
4530         if (mz) {
4531                 if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
4532                                 size > mz->len ||
4533                                 ((uintptr_t)mz->addr & (align - 1)) != 0) {
4534                         RTE_ETHDEV_LOG(ERR,
4535                                 "memzone %s does not justify the requested attributes\n",
4536                                 mz->name);
4537                         return NULL;
4538                 }
4539
4540                 return mz;
4541         }
4542
4543         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4544                         RTE_MEMZONE_IOVA_CONTIG, align);
4545 }
4546
4547 int
4548 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
4549                 uint16_t queue_id)
4550 {
4551         char z_name[RTE_MEMZONE_NAMESIZE];
4552         const struct rte_memzone *mz;
4553         int rc = 0;
4554
4555         rc = eth_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
4556                         queue_id, ring_name);
4557         if (rc >= RTE_MEMZONE_NAMESIZE) {
4558                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4559                 return -ENAMETOOLONG;
4560         }
4561
4562         mz = rte_memzone_lookup(z_name);
4563         if (mz)
4564                 rc = rte_memzone_free(mz);
4565         else
4566                 rc = -ENOENT;
4567
4568         return rc;
4569 }
4570
4571 int
4572 rte_eth_dev_create(struct rte_device *device, const char *name,
4573         size_t priv_data_size,
4574         ethdev_bus_specific_init ethdev_bus_specific_init,
4575         void *bus_init_params,
4576         ethdev_init_t ethdev_init, void *init_params)
4577 {
4578         struct rte_eth_dev *ethdev;
4579         int retval;
4580
4581         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4582
4583         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4584                 ethdev = rte_eth_dev_allocate(name);
4585                 if (!ethdev)
4586                         return -ENODEV;
4587
4588                 if (priv_data_size) {
4589                         ethdev->data->dev_private = rte_zmalloc_socket(
4590                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4591                                 device->numa_node);
4592
4593                         if (!ethdev->data->dev_private) {
4594                                 RTE_ETHDEV_LOG(ERR,
4595                                         "failed to allocate private data\n");
4596                                 retval = -ENOMEM;
4597                                 goto probe_failed;
4598                         }
4599                 }
4600         } else {
4601                 ethdev = rte_eth_dev_attach_secondary(name);
4602                 if (!ethdev) {
4603                         RTE_ETHDEV_LOG(ERR,
4604                                 "secondary process attach failed, ethdev doesn't exist\n");
4605                         return  -ENODEV;
4606                 }
4607         }
4608
4609         ethdev->device = device;
4610
4611         if (ethdev_bus_specific_init) {
4612                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4613                 if (retval) {
4614                         RTE_ETHDEV_LOG(ERR,
4615                                 "ethdev bus specific initialisation failed\n");
4616                         goto probe_failed;
4617                 }
4618         }
4619
4620         retval = ethdev_init(ethdev, init_params);
4621         if (retval) {
4622                 RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
4623                 goto probe_failed;
4624         }
4625
4626         rte_eth_dev_probing_finish(ethdev);
4627
4628         return retval;
4629
4630 probe_failed:
4631         rte_eth_dev_release_port(ethdev);
4632         return retval;
4633 }
4634
4635 int
4636 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4637         ethdev_uninit_t ethdev_uninit)
4638 {
4639         int ret;
4640
4641         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4642         if (!ethdev)
4643                 return -ENODEV;
4644
4645         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4646
4647         ret = ethdev_uninit(ethdev);
4648         if (ret)
4649                 return ret;
4650
4651         return rte_eth_dev_release_port(ethdev);
4652 }
4653
4654 int
4655 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4656                           int epfd, int op, void *data)
4657 {
4658         uint32_t vec;
4659         struct rte_eth_dev *dev;
4660         struct rte_intr_handle *intr_handle;
4661         int rc;
4662
4663         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4664
4665         dev = &rte_eth_devices[port_id];
4666         if (queue_id >= dev->data->nb_rx_queues) {
4667                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4668                 return -EINVAL;
4669         }
4670
4671         if (!dev->intr_handle) {
4672                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4673                 return -ENOTSUP;
4674         }
4675
4676         intr_handle = dev->intr_handle;
4677         if (!intr_handle->intr_vec) {
4678                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4679                 return -EPERM;
4680         }
4681
4682         vec = intr_handle->intr_vec[queue_id];
4683         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4684         if (rc && rc != -EEXIST) {
4685                 RTE_ETHDEV_LOG(ERR,
4686                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4687                         port_id, queue_id, op, epfd, vec);
4688                 return rc;
4689         }
4690
4691         return 0;
4692 }
4693
4694 int
4695 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4696                            uint16_t queue_id)
4697 {
4698         struct rte_eth_dev *dev;
4699         int ret;
4700
4701         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4702
4703         dev = &rte_eth_devices[port_id];
4704
4705         ret = eth_dev_validate_rx_queue(dev, queue_id);
4706         if (ret != 0)
4707                 return ret;
4708
4709         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4710         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4711                                                                 queue_id));
4712 }
4713
4714 int
4715 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4716                             uint16_t queue_id)
4717 {
4718         struct rte_eth_dev *dev;
4719         int ret;
4720
4721         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4722
4723         dev = &rte_eth_devices[port_id];
4724
4725         ret = eth_dev_validate_rx_queue(dev, queue_id);
4726         if (ret != 0)
4727                 return ret;
4728
4729         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4730         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4731                                                                 queue_id));
4732 }
4733
4734
4735 int
4736 rte_eth_dev_filter_supported(uint16_t port_id,
4737                              enum rte_filter_type filter_type)
4738 {
4739         struct rte_eth_dev *dev;
4740
4741         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4742
4743         dev = &rte_eth_devices[port_id];
4744         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4745         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4746                                 RTE_ETH_FILTER_NOP, NULL);
4747 }
4748
4749 int
4750 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4751                         enum rte_filter_op filter_op, void *arg)
4752 {
4753         struct rte_eth_dev *dev;
4754
4755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4756
4757         dev = &rte_eth_devices[port_id];
4758         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4759         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4760                                                              filter_op, arg));
4761 }
4762
4763 const struct rte_eth_rxtx_callback *
4764 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4765                 rte_rx_callback_fn fn, void *user_param)
4766 {
4767 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4768         rte_errno = ENOTSUP;
4769         return NULL;
4770 #endif
4771         struct rte_eth_dev *dev;
4772
4773         /* check input parameters */
4774         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4775                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4776                 rte_errno = EINVAL;
4777                 return NULL;
4778         }
4779         dev = &rte_eth_devices[port_id];
4780         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4781                 rte_errno = EINVAL;
4782                 return NULL;
4783         }
4784         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4785
4786         if (cb == NULL) {
4787                 rte_errno = ENOMEM;
4788                 return NULL;
4789         }
4790
4791         cb->fn.rx = fn;
4792         cb->param = user_param;
4793
4794         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4795         /* Add the callbacks in fifo order. */
4796         struct rte_eth_rxtx_callback *tail =
4797                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4798
4799         if (!tail) {
4800                 /* Stores to cb->fn and cb->param should complete before
4801                  * cb is visible to data plane.
4802                  */
4803                 __atomic_store_n(
4804                         &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4805                         cb, __ATOMIC_RELEASE);
4806
4807         } else {
4808                 while (tail->next)
4809                         tail = tail->next;
4810                 /* Stores to cb->fn and cb->param should complete before
4811                  * cb is visible to data plane.
4812                  */
4813                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4814         }
4815         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4816
4817         return cb;
4818 }
4819
4820 const struct rte_eth_rxtx_callback *
4821 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4822                 rte_rx_callback_fn fn, void *user_param)
4823 {
4824 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4825         rte_errno = ENOTSUP;
4826         return NULL;
4827 #endif
4828         /* check input parameters */
4829         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4830                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4831                 rte_errno = EINVAL;
4832                 return NULL;
4833         }
4834
4835         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4836
4837         if (cb == NULL) {
4838                 rte_errno = ENOMEM;
4839                 return NULL;
4840         }
4841
4842         cb->fn.rx = fn;
4843         cb->param = user_param;
4844
4845         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4846         /* Add the callbacks at first position */
4847         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4848         /* Stores to cb->fn, cb->param and cb->next should complete before
4849          * cb is visible to data plane threads.
4850          */
4851         __atomic_store_n(
4852                 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4853                 cb, __ATOMIC_RELEASE);
4854         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4855
4856         return cb;
4857 }
4858
4859 const struct rte_eth_rxtx_callback *
4860 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4861                 rte_tx_callback_fn fn, void *user_param)
4862 {
4863 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4864         rte_errno = ENOTSUP;
4865         return NULL;
4866 #endif
4867         struct rte_eth_dev *dev;
4868
4869         /* check input parameters */
4870         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4871                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4872                 rte_errno = EINVAL;
4873                 return NULL;
4874         }
4875
4876         dev = &rte_eth_devices[port_id];
4877         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4878                 rte_errno = EINVAL;
4879                 return NULL;
4880         }
4881
4882         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4883
4884         if (cb == NULL) {
4885                 rte_errno = ENOMEM;
4886                 return NULL;
4887         }
4888
4889         cb->fn.tx = fn;
4890         cb->param = user_param;
4891
4892         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4893         /* Add the callbacks in fifo order. */
4894         struct rte_eth_rxtx_callback *tail =
4895                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4896
4897         if (!tail) {
4898                 /* Stores to cb->fn and cb->param should complete before
4899                  * cb is visible to data plane.
4900                  */
4901                 __atomic_store_n(
4902                         &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4903                         cb, __ATOMIC_RELEASE);
4904
4905         } else {
4906                 while (tail->next)
4907                         tail = tail->next;
4908                 /* Stores to cb->fn and cb->param should complete before
4909                  * cb is visible to data plane.
4910                  */
4911                 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4912         }
4913         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4914
4915         return cb;
4916 }
4917
4918 int
4919 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4920                 const struct rte_eth_rxtx_callback *user_cb)
4921 {
4922 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4923         return -ENOTSUP;
4924 #endif
4925         /* Check input parameters. */
4926         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4927         if (user_cb == NULL ||
4928                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4929                 return -EINVAL;
4930
4931         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4932         struct rte_eth_rxtx_callback *cb;
4933         struct rte_eth_rxtx_callback **prev_cb;
4934         int ret = -EINVAL;
4935
4936         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4937         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4938         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4939                 cb = *prev_cb;
4940                 if (cb == user_cb) {
4941                         /* Remove the user cb from the callback list. */
4942                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4943                         ret = 0;
4944                         break;
4945                 }
4946         }
4947         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4948
4949         return ret;
4950 }
4951
4952 int
4953 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4954                 const struct rte_eth_rxtx_callback *user_cb)
4955 {
4956 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4957         return -ENOTSUP;
4958 #endif
4959         /* Check input parameters. */
4960         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4961         if (user_cb == NULL ||
4962                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4963                 return -EINVAL;
4964
4965         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4966         int ret = -EINVAL;
4967         struct rte_eth_rxtx_callback *cb;
4968         struct rte_eth_rxtx_callback **prev_cb;
4969
4970         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4971         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4972         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4973                 cb = *prev_cb;
4974                 if (cb == user_cb) {
4975                         /* Remove the user cb from the callback list. */
4976                         __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4977                         ret = 0;
4978                         break;
4979                 }
4980         }
4981         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4982
4983         return ret;
4984 }
4985
4986 int
4987 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4988         struct rte_eth_rxq_info *qinfo)
4989 {
4990         struct rte_eth_dev *dev;
4991
4992         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4993
4994         if (qinfo == NULL)
4995                 return -EINVAL;
4996
4997         dev = &rte_eth_devices[port_id];
4998         if (queue_id >= dev->data->nb_rx_queues) {
4999                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5000                 return -EINVAL;
5001         }
5002
5003         if (dev->data->rx_queues == NULL ||
5004                         dev->data->rx_queues[queue_id] == NULL) {
5005                 RTE_ETHDEV_LOG(ERR,
5006                                "Rx queue %"PRIu16" of device with port_id=%"
5007                                PRIu16" has not been setup\n",
5008                                queue_id, port_id);
5009                 return -EINVAL;
5010         }
5011
5012         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5013                 RTE_ETHDEV_LOG(INFO,
5014                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5015                         queue_id, port_id);
5016                 return -EINVAL;
5017         }
5018
5019         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
5020
5021         memset(qinfo, 0, sizeof(*qinfo));
5022         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5023         return 0;
5024 }
5025
5026 int
5027 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5028         struct rte_eth_txq_info *qinfo)
5029 {
5030         struct rte_eth_dev *dev;
5031
5032         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5033
5034         if (qinfo == NULL)
5035                 return -EINVAL;
5036
5037         dev = &rte_eth_devices[port_id];
5038         if (queue_id >= dev->data->nb_tx_queues) {
5039                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5040                 return -EINVAL;
5041         }
5042
5043         if (dev->data->tx_queues == NULL ||
5044                         dev->data->tx_queues[queue_id] == NULL) {
5045                 RTE_ETHDEV_LOG(ERR,
5046                                "Tx queue %"PRIu16" of device with port_id=%"
5047                                PRIu16" has not been setup\n",
5048                                queue_id, port_id);
5049                 return -EINVAL;
5050         }
5051
5052         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5053                 RTE_ETHDEV_LOG(INFO,
5054                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5055                         queue_id, port_id);
5056                 return -EINVAL;
5057         }
5058
5059         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5060
5061         memset(qinfo, 0, sizeof(*qinfo));
5062         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5063
5064         return 0;
5065 }
5066
5067 int
5068 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5069                           struct rte_eth_burst_mode *mode)
5070 {
5071         struct rte_eth_dev *dev;
5072
5073         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5074
5075         if (mode == NULL)
5076                 return -EINVAL;
5077
5078         dev = &rte_eth_devices[port_id];
5079
5080         if (queue_id >= dev->data->nb_rx_queues) {
5081                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
5082                 return -EINVAL;
5083         }
5084
5085         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5086         memset(mode, 0, sizeof(*mode));
5087         return eth_err(port_id,
5088                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5089 }
5090
5091 int
5092 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5093                           struct rte_eth_burst_mode *mode)
5094 {
5095         struct rte_eth_dev *dev;
5096
5097         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5098
5099         if (mode == NULL)
5100                 return -EINVAL;
5101
5102         dev = &rte_eth_devices[port_id];
5103
5104         if (queue_id >= dev->data->nb_tx_queues) {
5105                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
5106                 return -EINVAL;
5107         }
5108
5109         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5110         memset(mode, 0, sizeof(*mode));
5111         return eth_err(port_id,
5112                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5113 }
5114
5115 int
5116 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5117                              struct rte_ether_addr *mc_addr_set,
5118                              uint32_t nb_mc_addr)
5119 {
5120         struct rte_eth_dev *dev;
5121
5122         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5123
5124         dev = &rte_eth_devices[port_id];
5125         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5126         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5127                                                 mc_addr_set, nb_mc_addr));
5128 }
5129
5130 int
5131 rte_eth_timesync_enable(uint16_t port_id)
5132 {
5133         struct rte_eth_dev *dev;
5134
5135         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5136         dev = &rte_eth_devices[port_id];
5137
5138         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5139         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5140 }
5141
5142 int
5143 rte_eth_timesync_disable(uint16_t port_id)
5144 {
5145         struct rte_eth_dev *dev;
5146
5147         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5148         dev = &rte_eth_devices[port_id];
5149
5150         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5151         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5152 }
5153
5154 int
5155 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5156                                    uint32_t flags)
5157 {
5158         struct rte_eth_dev *dev;
5159
5160         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5161         dev = &rte_eth_devices[port_id];
5162
5163         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5164         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5165                                 (dev, timestamp, flags));
5166 }
5167
5168 int
5169 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5170                                    struct timespec *timestamp)
5171 {
5172         struct rte_eth_dev *dev;
5173
5174         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5175         dev = &rte_eth_devices[port_id];
5176
5177         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5178         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5179                                 (dev, timestamp));
5180 }
5181
5182 int
5183 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5184 {
5185         struct rte_eth_dev *dev;
5186
5187         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5188         dev = &rte_eth_devices[port_id];
5189
5190         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5191         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
5192                                                                       delta));
5193 }
5194
5195 int
5196 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5197 {
5198         struct rte_eth_dev *dev;
5199
5200         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5201         dev = &rte_eth_devices[port_id];
5202
5203         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5204         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5205                                                                 timestamp));
5206 }
5207
5208 int
5209 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5210 {
5211         struct rte_eth_dev *dev;
5212
5213         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5214         dev = &rte_eth_devices[port_id];
5215
5216         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5217         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5218                                                                 timestamp));
5219 }
5220
5221 int
5222 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5223 {
5224         struct rte_eth_dev *dev;
5225
5226         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5227         dev = &rte_eth_devices[port_id];
5228
5229         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5230         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5231 }
5232
5233 int
5234 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5235 {
5236         struct rte_eth_dev *dev;
5237
5238         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5239
5240         dev = &rte_eth_devices[port_id];
5241         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5242         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5243 }
5244
5245 int
5246 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5247 {
5248         struct rte_eth_dev *dev;
5249
5250         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5251
5252         dev = &rte_eth_devices[port_id];
5253         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5254         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5255 }
5256
5257 int
5258 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5259 {
5260         struct rte_eth_dev *dev;
5261
5262         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5263
5264         dev = &rte_eth_devices[port_id];
5265         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5266         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5267 }
5268
5269 int
5270 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5271 {
5272         struct rte_eth_dev *dev;
5273
5274         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5275
5276         dev = &rte_eth_devices[port_id];
5277         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5278         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5279 }
5280
5281 int
5282 rte_eth_dev_get_module_info(uint16_t port_id,
5283                             struct rte_eth_dev_module_info *modinfo)
5284 {
5285         struct rte_eth_dev *dev;
5286
5287         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5288
5289         dev = &rte_eth_devices[port_id];
5290         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5291         return (*dev->dev_ops->get_module_info)(dev, modinfo);
5292 }
5293
5294 int
5295 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5296                               struct rte_dev_eeprom_info *info)
5297 {
5298         struct rte_eth_dev *dev;
5299
5300         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5301
5302         dev = &rte_eth_devices[port_id];
5303         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5304         return (*dev->dev_ops->get_module_eeprom)(dev, info);
5305 }
5306
5307 int
5308 rte_eth_dev_get_dcb_info(uint16_t port_id,
5309                              struct rte_eth_dcb_info *dcb_info)
5310 {
5311         struct rte_eth_dev *dev;
5312
5313         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5314
5315         dev = &rte_eth_devices[port_id];
5316         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5317
5318         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5319         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5320 }
5321
5322 int
5323 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
5324                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
5325 {
5326         struct rte_eth_dev *dev;
5327
5328         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5329         if (l2_tunnel == NULL) {
5330                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
5331                 return -EINVAL;
5332         }
5333
5334         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
5335                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
5336                 return -EINVAL;
5337         }
5338
5339         dev = &rte_eth_devices[port_id];
5340         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
5341                                 -ENOTSUP);
5342         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
5343                                                                 l2_tunnel));
5344 }
5345
5346 int
5347 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
5348                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
5349                                   uint32_t mask,
5350                                   uint8_t en)
5351 {
5352         struct rte_eth_dev *dev;
5353
5354         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5355
5356         if (l2_tunnel == NULL) {
5357                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
5358                 return -EINVAL;
5359         }
5360
5361         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
5362                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
5363                 return -EINVAL;
5364         }
5365
5366         if (mask == 0) {
5367                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
5368                 return -EINVAL;
5369         }
5370
5371         dev = &rte_eth_devices[port_id];
5372         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
5373                                 -ENOTSUP);
5374         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
5375                                                         l2_tunnel, mask, en));
5376 }
5377
5378 static void
5379 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5380                            const struct rte_eth_desc_lim *desc_lim)
5381 {
5382         if (desc_lim->nb_align != 0)
5383                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5384
5385         if (desc_lim->nb_max != 0)
5386                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5387
5388         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5389 }
5390
5391 int
5392 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5393                                  uint16_t *nb_rx_desc,
5394                                  uint16_t *nb_tx_desc)
5395 {
5396         struct rte_eth_dev_info dev_info;
5397         int ret;
5398
5399         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5400
5401         ret = rte_eth_dev_info_get(port_id, &dev_info);
5402         if (ret != 0)
5403                 return ret;
5404
5405         if (nb_rx_desc != NULL)
5406                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5407
5408         if (nb_tx_desc != NULL)
5409                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5410
5411         return 0;
5412 }
5413
5414 int
5415 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5416                                    struct rte_eth_hairpin_cap *cap)
5417 {
5418         struct rte_eth_dev *dev;
5419
5420         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
5421
5422         dev = &rte_eth_devices[port_id];
5423         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5424         memset(cap, 0, sizeof(*cap));
5425         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5426 }
5427
5428 int
5429 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5430 {
5431         if (dev->data->rx_queue_state[queue_id] ==
5432             RTE_ETH_QUEUE_STATE_HAIRPIN)
5433                 return 1;
5434         return 0;
5435 }
5436
5437 int
5438 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5439 {
5440         if (dev->data->tx_queue_state[queue_id] ==
5441             RTE_ETH_QUEUE_STATE_HAIRPIN)
5442                 return 1;
5443         return 0;
5444 }
5445
5446 int
5447 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5448 {
5449         struct rte_eth_dev *dev;
5450
5451         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5452
5453         if (pool == NULL)
5454                 return -EINVAL;
5455
5456         dev = &rte_eth_devices[port_id];
5457
5458         if (*dev->dev_ops->pool_ops_supported == NULL)
5459                 return 1; /* all pools are supported */
5460
5461         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5462 }
5463
5464 /**
5465  * A set of values to describe the possible states of a switch domain.
5466  */
5467 enum rte_eth_switch_domain_state {
5468         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5469         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5470 };
5471
5472 /**
5473  * Array of switch domains available for allocation. Array is sized to
5474  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5475  * ethdev ports in a single process.
5476  */
5477 static struct rte_eth_dev_switch {
5478         enum rte_eth_switch_domain_state state;
5479 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
5480
5481 int
5482 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5483 {
5484         unsigned int i;
5485
5486         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5487
5488         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5489                 if (rte_eth_switch_domains[i].state ==
5490                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5491                         rte_eth_switch_domains[i].state =
5492                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5493                         *domain_id = i;
5494                         return 0;
5495                 }
5496         }
5497
5498         return -ENOSPC;
5499 }
5500
5501 int
5502 rte_eth_switch_domain_free(uint16_t domain_id)
5503 {
5504         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5505                 domain_id >= RTE_MAX_ETHPORTS)
5506                 return -EINVAL;
5507
5508         if (rte_eth_switch_domains[domain_id].state !=
5509                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5510                 return -EINVAL;
5511
5512         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5513
5514         return 0;
5515 }
5516
5517 static int
5518 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5519 {
5520         int state;
5521         struct rte_kvargs_pair *pair;
5522         char *letter;
5523
5524         arglist->str = strdup(str_in);
5525         if (arglist->str == NULL)
5526                 return -ENOMEM;
5527
5528         letter = arglist->str;
5529         state = 0;
5530         arglist->count = 0;
5531         pair = &arglist->pairs[0];
5532         while (1) {
5533                 switch (state) {
5534                 case 0: /* Initial */
5535                         if (*letter == '=')
5536                                 return -EINVAL;
5537                         else if (*letter == '\0')
5538                                 return 0;
5539
5540                         state = 1;
5541                         pair->key = letter;
5542                         /* fall-thru */
5543
5544                 case 1: /* Parsing key */
5545                         if (*letter == '=') {
5546                                 *letter = '\0';
5547                                 pair->value = letter + 1;
5548                                 state = 2;
5549                         } else if (*letter == ',' || *letter == '\0')
5550                                 return -EINVAL;
5551                         break;
5552
5553
5554                 case 2: /* Parsing value */
5555                         if (*letter == '[')
5556                                 state = 3;
5557                         else if (*letter == ',') {
5558                                 *letter = '\0';
5559                                 arglist->count++;
5560                                 pair = &arglist->pairs[arglist->count];
5561                                 state = 0;
5562                         } else if (*letter == '\0') {
5563                                 letter--;
5564                                 arglist->count++;
5565                                 pair = &arglist->pairs[arglist->count];
5566                                 state = 0;
5567                         }
5568                         break;
5569
5570                 case 3: /* Parsing list */
5571                         if (*letter == ']')
5572                                 state = 2;
5573                         else if (*letter == '\0')
5574                                 return -EINVAL;
5575                         break;
5576                 }
5577                 letter++;
5578         }
5579 }
5580
5581 int
5582 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5583 {
5584         struct rte_kvargs args;
5585         struct rte_kvargs_pair *pair;
5586         unsigned int i;
5587         int result = 0;
5588
5589         memset(eth_da, 0, sizeof(*eth_da));
5590
5591         result = rte_eth_devargs_tokenise(&args, dargs);
5592         if (result < 0)
5593                 goto parse_cleanup;
5594
5595         for (i = 0; i < args.count; i++) {
5596                 pair = &args.pairs[i];
5597                 if (strcmp("representor", pair->key) == 0) {
5598                         result = rte_eth_devargs_parse_list(pair->value,
5599                                 rte_eth_devargs_parse_representor_ports,
5600                                 eth_da);
5601                         if (result < 0)
5602                                 goto parse_cleanup;
5603                 }
5604         }
5605
5606 parse_cleanup:
5607         if (args.str)
5608                 free(args.str);
5609
5610         return result;
5611 }
5612
5613 static int
5614 handle_port_list(const char *cmd __rte_unused,
5615                 const char *params __rte_unused,
5616                 struct rte_tel_data *d)
5617 {
5618         int port_id;
5619
5620         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5621         RTE_ETH_FOREACH_DEV(port_id)
5622                 rte_tel_data_add_array_int(d, port_id);
5623         return 0;
5624 }
5625
5626 static void
5627 add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5628                 const char *stat_name)
5629 {
5630         int q;
5631         struct rte_tel_data *q_data = rte_tel_data_alloc();
5632         rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5633         for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5634                 rte_tel_data_add_array_u64(q_data, q_stats[q]);
5635         rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5636 }
5637
5638 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5639
5640 static int
5641 handle_port_stats(const char *cmd __rte_unused,
5642                 const char *params,
5643                 struct rte_tel_data *d)
5644 {
5645         struct rte_eth_stats stats;
5646         int port_id, ret;
5647
5648         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5649                 return -1;
5650
5651         port_id = atoi(params);
5652         if (!rte_eth_dev_is_valid_port(port_id))
5653                 return -1;
5654
5655         ret = rte_eth_stats_get(port_id, &stats);
5656         if (ret < 0)
5657                 return -1;
5658
5659         rte_tel_data_start_dict(d);
5660         ADD_DICT_STAT(stats, ipackets);
5661         ADD_DICT_STAT(stats, opackets);
5662         ADD_DICT_STAT(stats, ibytes);
5663         ADD_DICT_STAT(stats, obytes);
5664         ADD_DICT_STAT(stats, imissed);
5665         ADD_DICT_STAT(stats, ierrors);
5666         ADD_DICT_STAT(stats, oerrors);
5667         ADD_DICT_STAT(stats, rx_nombuf);
5668         add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5669         add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5670         add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5671         add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5672         add_port_queue_stats(d, stats.q_errors, "q_errors");
5673
5674         return 0;
5675 }
5676
5677 static int
5678 handle_port_xstats(const char *cmd __rte_unused,
5679                 const char *params,
5680                 struct rte_tel_data *d)
5681 {
5682         struct rte_eth_xstat *eth_xstats;
5683         struct rte_eth_xstat_name *xstat_names;
5684         int port_id, num_xstats;
5685         int i, ret;
5686         char *end_param;
5687
5688         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5689                 return -1;
5690
5691         port_id = strtoul(params, &end_param, 0);
5692         if (*end_param != '\0')
5693                 RTE_ETHDEV_LOG(NOTICE,
5694                         "Extra parameters passed to ethdev telemetry command, ignoring");
5695         if (!rte_eth_dev_is_valid_port(port_id))
5696                 return -1;
5697
5698         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5699         if (num_xstats < 0)
5700                 return -1;
5701
5702         /* use one malloc for both names and stats */
5703         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5704                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
5705         if (eth_xstats == NULL)
5706                 return -1;
5707         xstat_names = (void *)&eth_xstats[num_xstats];
5708
5709         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5710         if (ret < 0 || ret > num_xstats) {
5711                 free(eth_xstats);
5712                 return -1;
5713         }
5714
5715         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5716         if (ret < 0 || ret > num_xstats) {
5717                 free(eth_xstats);
5718                 return -1;
5719         }
5720
5721         rte_tel_data_start_dict(d);
5722         for (i = 0; i < num_xstats; i++)
5723                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5724                                 eth_xstats[i].value);
5725         return 0;
5726 }
5727
5728 static int
5729 handle_port_link_status(const char *cmd __rte_unused,
5730                 const char *params,
5731                 struct rte_tel_data *d)
5732 {
5733         static const char *status_str = "status";
5734         int ret, port_id;
5735         struct rte_eth_link link;
5736         char *end_param;
5737
5738         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5739                 return -1;
5740
5741         port_id = strtoul(params, &end_param, 0);
5742         if (*end_param != '\0')
5743                 RTE_ETHDEV_LOG(NOTICE,
5744                         "Extra parameters passed to ethdev telemetry command, ignoring");
5745         if (!rte_eth_dev_is_valid_port(port_id))
5746                 return -1;
5747
5748         ret = rte_eth_link_get(port_id, &link);
5749         if (ret < 0)
5750                 return -1;
5751
5752         rte_tel_data_start_dict(d);
5753         if (!link.link_status) {
5754                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
5755                 return 0;
5756         }
5757         rte_tel_data_add_dict_string(d, status_str, "UP");
5758         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5759         rte_tel_data_add_dict_string(d, "duplex",
5760                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
5761                                 "full-duplex" : "half-duplex");
5762         return 0;
5763 }
5764
5765 int
5766 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
5767                                   struct rte_hairpin_peer_info *cur_info,
5768                                   struct rte_hairpin_peer_info *peer_info,
5769                                   uint32_t direction)
5770 {
5771         struct rte_eth_dev *dev;
5772
5773         /* Current queue information is not mandatory. */
5774         if (peer_info == NULL)
5775                 return -EINVAL;
5776
5777         /* No need to check the validity again. */
5778         dev = &rte_eth_devices[peer_port];
5779         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
5780                                 -ENOTSUP);
5781
5782         return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
5783                                         cur_info, peer_info, direction);
5784 }
5785
5786 int
5787 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
5788                                 struct rte_hairpin_peer_info *peer_info,
5789                                 uint32_t direction)
5790 {
5791         struct rte_eth_dev *dev;
5792
5793         if (peer_info == NULL)
5794                 return -EINVAL;
5795
5796         /* No need to check the validity again. */
5797         dev = &rte_eth_devices[cur_port];
5798         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
5799                                 -ENOTSUP);
5800
5801         return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
5802                                                         peer_info, direction);
5803 }
5804
5805 int
5806 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
5807                                   uint32_t direction)
5808 {
5809         struct rte_eth_dev *dev;
5810
5811         /* No need to check the validity again. */
5812         dev = &rte_eth_devices[cur_port];
5813         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
5814                                 -ENOTSUP);
5815
5816         return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
5817                                                           direction);
5818 }
5819
5820 RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
5821
5822 RTE_INIT(ethdev_init_telemetry)
5823 {
5824         rte_telemetry_register_cmd("/ethdev/list", handle_port_list,
5825                         "Returns list of available ethdev ports. Takes no parameters");
5826         rte_telemetry_register_cmd("/ethdev/stats", handle_port_stats,
5827                         "Returns the common stats for a port. Parameters: int port_id");
5828         rte_telemetry_register_cmd("/ethdev/xstats", handle_port_xstats,
5829                         "Returns the extended stats for a port. Parameters: int port_id");
5830         rte_telemetry_register_cmd("/ethdev/link_status",
5831                         handle_port_link_status,
5832                         "Returns the link status for a port. Parameters: int port_id");
5833 }