4cfc6d170748eebeea03cc6bd67eb45e6d416584
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40 #include <rte_ether.h>
41 #ifdef RTE_LIBRTE_TELEMETRY
42 #include <rte_telemetry.h>
43 #endif
44
45 #include "rte_ethdev_trace.h"
46 #include "rte_ethdev.h"
47 #include "rte_ethdev_driver.h"
48 #include "ethdev_profile.h"
49 #include "ethdev_private.h"
50
51 int rte_eth_dev_logtype;
52
53 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
54 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
55
56 /* spinlock for eth device callbacks */
57 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
58
59 /* spinlock for add/remove rx callbacks */
60 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
61
62 /* spinlock for add/remove tx callbacks */
63 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
64
65 /* spinlock for shared data allocation */
66 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
67
68 /* store statistics names and its offset in stats structure  */
69 struct rte_eth_xstats_name_off {
70         char name[RTE_ETH_XSTATS_NAME_SIZE];
71         unsigned offset;
72 };
73
74 /* Shared memory between primary and secondary processes. */
75 static struct {
76         uint64_t next_owner_id;
77         rte_spinlock_t ownership_lock;
78         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
79 } *rte_eth_dev_shared_data;
80
81 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
82         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
83         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
84         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
85         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
86         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
87         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
88         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
89         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
90                 rx_nombuf)},
91 };
92
93 #define RTE_NB_STATS RTE_DIM(rte_stats_strings)
94
95 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
96         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
97         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
98         {"errors", offsetof(struct rte_eth_stats, q_errors)},
99 };
100
101 #define RTE_NB_RXQ_STATS RTE_DIM(rte_rxq_stats_strings)
102
103 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
104         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
105         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
106 };
107 #define RTE_NB_TXQ_STATS RTE_DIM(rte_txq_stats_strings)
108
109 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
110         { DEV_RX_OFFLOAD_##_name, #_name }
111
112 static const struct {
113         uint64_t offload;
114         const char *name;
115 } rte_rx_offload_names[] = {
116         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
117         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
119         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
120         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
121         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
123         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
124         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
125         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
126         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
127         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
128         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
129         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
130         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
131         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
132         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
133         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
134         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
135 };
136
137 #undef RTE_RX_OFFLOAD_BIT2STR
138
139 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
140         { DEV_TX_OFFLOAD_##_name, #_name }
141
142 static const struct {
143         uint64_t offload;
144         const char *name;
145 } rte_tx_offload_names[] = {
146         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
147         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
150         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
151         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
152         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
153         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
154         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
155         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
157         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
158         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
159         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
160         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
161         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
162         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
163         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
164         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
165         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
166         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
167 };
168
169 #undef RTE_TX_OFFLOAD_BIT2STR
170
171 /**
172  * The user application callback description.
173  *
174  * It contains callback address to be registered by user application,
175  * the pointer to the parameters for callback, and the event type.
176  */
177 struct rte_eth_dev_callback {
178         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
179         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
180         void *cb_arg;                           /**< Parameter for callback */
181         void *ret_param;                        /**< Return parameter */
182         enum rte_eth_event_type event;          /**< Interrupt event type */
183         uint32_t active;                        /**< Callback is executing */
184 };
185
186 enum {
187         STAT_QMAP_TX = 0,
188         STAT_QMAP_RX
189 };
190
191 int
192 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
193 {
194         int ret;
195         struct rte_devargs devargs = {.args = NULL};
196         const char *bus_param_key;
197         char *bus_str = NULL;
198         char *cls_str = NULL;
199         int str_size;
200
201         memset(iter, 0, sizeof(*iter));
202
203         /*
204          * The devargs string may use various syntaxes:
205          *   - 0000:08:00.0,representor=[1-3]
206          *   - pci:0000:06:00.0,representor=[0,5]
207          *   - class=eth,mac=00:11:22:33:44:55
208          * A new syntax is in development (not yet supported):
209          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
210          */
211
212         /*
213          * Handle pure class filter (i.e. without any bus-level argument),
214          * from future new syntax.
215          * rte_devargs_parse() is not yet supporting the new syntax,
216          * that's why this simple case is temporarily parsed here.
217          */
218 #define iter_anybus_str "class=eth,"
219         if (strncmp(devargs_str, iter_anybus_str,
220                         strlen(iter_anybus_str)) == 0) {
221                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
222                 goto end;
223         }
224
225         /* Split bus, device and parameters. */
226         ret = rte_devargs_parse(&devargs, devargs_str);
227         if (ret != 0)
228                 goto error;
229
230         /*
231          * Assume parameters of old syntax can match only at ethdev level.
232          * Extra parameters will be ignored, thanks to "+" prefix.
233          */
234         str_size = strlen(devargs.args) + 2;
235         cls_str = malloc(str_size);
236         if (cls_str == NULL) {
237                 ret = -ENOMEM;
238                 goto error;
239         }
240         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
241         if (ret != str_size - 1) {
242                 ret = -EINVAL;
243                 goto error;
244         }
245         iter->cls_str = cls_str;
246         free(devargs.args); /* allocated by rte_devargs_parse() */
247         devargs.args = NULL;
248
249         iter->bus = devargs.bus;
250         if (iter->bus->dev_iterate == NULL) {
251                 ret = -ENOTSUP;
252                 goto error;
253         }
254
255         /* Convert bus args to new syntax for use with new API dev_iterate. */
256         if (strcmp(iter->bus->name, "vdev") == 0) {
257                 bus_param_key = "name";
258         } else if (strcmp(iter->bus->name, "pci") == 0) {
259                 bus_param_key = "addr";
260         } else {
261                 ret = -ENOTSUP;
262                 goto error;
263         }
264         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
265         bus_str = malloc(str_size);
266         if (bus_str == NULL) {
267                 ret = -ENOMEM;
268                 goto error;
269         }
270         ret = snprintf(bus_str, str_size, "%s=%s",
271                         bus_param_key, devargs.name);
272         if (ret != str_size - 1) {
273                 ret = -EINVAL;
274                 goto error;
275         }
276         iter->bus_str = bus_str;
277
278 end:
279         iter->cls = rte_class_find_by_name("eth");
280         return 0;
281
282 error:
283         if (ret == -ENOTSUP)
284                 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
285                                 iter->bus->name);
286         free(devargs.args);
287         free(bus_str);
288         free(cls_str);
289         return ret;
290 }
291
292 uint16_t
293 rte_eth_iterator_next(struct rte_dev_iterator *iter)
294 {
295         if (iter->cls == NULL) /* invalid ethdev iterator */
296                 return RTE_MAX_ETHPORTS;
297
298         do { /* loop to try all matching rte_device */
299                 /* If not pure ethdev filter and */
300                 if (iter->bus != NULL &&
301                                 /* not in middle of rte_eth_dev iteration, */
302                                 iter->class_device == NULL) {
303                         /* get next rte_device to try. */
304                         iter->device = iter->bus->dev_iterate(
305                                         iter->device, iter->bus_str, iter);
306                         if (iter->device == NULL)
307                                 break; /* no more rte_device candidate */
308                 }
309                 /* A device is matching bus part, need to check ethdev part. */
310                 iter->class_device = iter->cls->dev_iterate(
311                                 iter->class_device, iter->cls_str, iter);
312                 if (iter->class_device != NULL)
313                         return eth_dev_to_id(iter->class_device); /* match */
314         } while (iter->bus != NULL); /* need to try next rte_device */
315
316         /* No more ethdev port to iterate. */
317         rte_eth_iterator_cleanup(iter);
318         return RTE_MAX_ETHPORTS;
319 }
320
321 void
322 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
323 {
324         if (iter->bus_str == NULL)
325                 return; /* nothing to free in pure class filter */
326         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
327         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
328         memset(iter, 0, sizeof(*iter));
329 }
330
331 uint16_t
332 rte_eth_find_next(uint16_t port_id)
333 {
334         while (port_id < RTE_MAX_ETHPORTS &&
335                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
336                 port_id++;
337
338         if (port_id >= RTE_MAX_ETHPORTS)
339                 return RTE_MAX_ETHPORTS;
340
341         return port_id;
342 }
343
344 /*
345  * Macro to iterate over all valid ports for internal usage.
346  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
347  */
348 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
349         for (port_id = rte_eth_find_next(0); \
350              port_id < RTE_MAX_ETHPORTS; \
351              port_id = rte_eth_find_next(port_id + 1))
352
353 uint16_t
354 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
355 {
356         port_id = rte_eth_find_next(port_id);
357         while (port_id < RTE_MAX_ETHPORTS &&
358                         rte_eth_devices[port_id].device != parent)
359                 port_id = rte_eth_find_next(port_id + 1);
360
361         return port_id;
362 }
363
364 uint16_t
365 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
366 {
367         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
368         return rte_eth_find_next_of(port_id,
369                         rte_eth_devices[ref_port_id].device);
370 }
371
372 static void
373 rte_eth_dev_shared_data_prepare(void)
374 {
375         const unsigned flags = 0;
376         const struct rte_memzone *mz;
377
378         rte_spinlock_lock(&rte_eth_shared_data_lock);
379
380         if (rte_eth_dev_shared_data == NULL) {
381                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
382                         /* Allocate port data and ownership shared memory. */
383                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
384                                         sizeof(*rte_eth_dev_shared_data),
385                                         rte_socket_id(), flags);
386                 } else
387                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
388                 if (mz == NULL)
389                         rte_panic("Cannot allocate ethdev shared data\n");
390
391                 rte_eth_dev_shared_data = mz->addr;
392                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
393                         rte_eth_dev_shared_data->next_owner_id =
394                                         RTE_ETH_DEV_NO_OWNER + 1;
395                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
396                         memset(rte_eth_dev_shared_data->data, 0,
397                                sizeof(rte_eth_dev_shared_data->data));
398                 }
399         }
400
401         rte_spinlock_unlock(&rte_eth_shared_data_lock);
402 }
403
404 static bool
405 is_allocated(const struct rte_eth_dev *ethdev)
406 {
407         return ethdev->data->name[0] != '\0';
408 }
409
410 static struct rte_eth_dev *
411 _rte_eth_dev_allocated(const char *name)
412 {
413         unsigned i;
414
415         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
416                 if (rte_eth_devices[i].data != NULL &&
417                     strcmp(rte_eth_devices[i].data->name, name) == 0)
418                         return &rte_eth_devices[i];
419         }
420         return NULL;
421 }
422
423 struct rte_eth_dev *
424 rte_eth_dev_allocated(const char *name)
425 {
426         struct rte_eth_dev *ethdev;
427
428         rte_eth_dev_shared_data_prepare();
429
430         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
431
432         ethdev = _rte_eth_dev_allocated(name);
433
434         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
435
436         return ethdev;
437 }
438
439 static uint16_t
440 rte_eth_dev_find_free_port(void)
441 {
442         unsigned i;
443
444         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
445                 /* Using shared name field to find a free port. */
446                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
447                         RTE_ASSERT(rte_eth_devices[i].state ==
448                                    RTE_ETH_DEV_UNUSED);
449                         return i;
450                 }
451         }
452         return RTE_MAX_ETHPORTS;
453 }
454
455 static struct rte_eth_dev *
456 eth_dev_get(uint16_t port_id)
457 {
458         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
459
460         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
461
462         return eth_dev;
463 }
464
465 struct rte_eth_dev *
466 rte_eth_dev_allocate(const char *name)
467 {
468         uint16_t port_id;
469         struct rte_eth_dev *eth_dev = NULL;
470         size_t name_len;
471
472         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
473         if (name_len == 0) {
474                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
475                 return NULL;
476         }
477
478         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
479                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
480                 return NULL;
481         }
482
483         rte_eth_dev_shared_data_prepare();
484
485         /* Synchronize port creation between primary and secondary threads. */
486         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
487
488         if (_rte_eth_dev_allocated(name) != NULL) {
489                 RTE_ETHDEV_LOG(ERR,
490                         "Ethernet device with name %s already allocated\n",
491                         name);
492                 goto unlock;
493         }
494
495         port_id = rte_eth_dev_find_free_port();
496         if (port_id == RTE_MAX_ETHPORTS) {
497                 RTE_ETHDEV_LOG(ERR,
498                         "Reached maximum number of Ethernet ports\n");
499                 goto unlock;
500         }
501
502         eth_dev = eth_dev_get(port_id);
503         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
504         eth_dev->data->port_id = port_id;
505         eth_dev->data->mtu = RTE_ETHER_MTU;
506
507 unlock:
508         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
509
510         return eth_dev;
511 }
512
513 /*
514  * Attach to a port already registered by the primary process, which
515  * makes sure that the same device would have the same port id both
516  * in the primary and secondary process.
517  */
518 struct rte_eth_dev *
519 rte_eth_dev_attach_secondary(const char *name)
520 {
521         uint16_t i;
522         struct rte_eth_dev *eth_dev = NULL;
523
524         rte_eth_dev_shared_data_prepare();
525
526         /* Synchronize port attachment to primary port creation and release. */
527         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
528
529         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
530                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
531                         break;
532         }
533         if (i == RTE_MAX_ETHPORTS) {
534                 RTE_ETHDEV_LOG(ERR,
535                         "Device %s is not driven by the primary process\n",
536                         name);
537         } else {
538                 eth_dev = eth_dev_get(i);
539                 RTE_ASSERT(eth_dev->data->port_id == i);
540         }
541
542         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
543         return eth_dev;
544 }
545
546 int
547 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
548 {
549         if (eth_dev == NULL)
550                 return -EINVAL;
551
552         rte_eth_dev_shared_data_prepare();
553
554         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
555                 _rte_eth_dev_callback_process(eth_dev,
556                                 RTE_ETH_EVENT_DESTROY, NULL);
557
558         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
559
560         eth_dev->state = RTE_ETH_DEV_UNUSED;
561
562         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
563                 rte_free(eth_dev->data->rx_queues);
564                 rte_free(eth_dev->data->tx_queues);
565                 rte_free(eth_dev->data->mac_addrs);
566                 rte_free(eth_dev->data->hash_mac_addrs);
567                 rte_free(eth_dev->data->dev_private);
568                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
569         }
570
571         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
572
573         return 0;
574 }
575
576 int
577 rte_eth_dev_is_valid_port(uint16_t port_id)
578 {
579         if (port_id >= RTE_MAX_ETHPORTS ||
580             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
581                 return 0;
582         else
583                 return 1;
584 }
585
586 static int
587 rte_eth_is_valid_owner_id(uint64_t owner_id)
588 {
589         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
590             rte_eth_dev_shared_data->next_owner_id <= owner_id)
591                 return 0;
592         return 1;
593 }
594
595 uint64_t
596 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
597 {
598         port_id = rte_eth_find_next(port_id);
599         while (port_id < RTE_MAX_ETHPORTS &&
600                         rte_eth_devices[port_id].data->owner.id != owner_id)
601                 port_id = rte_eth_find_next(port_id + 1);
602
603         return port_id;
604 }
605
606 int
607 rte_eth_dev_owner_new(uint64_t *owner_id)
608 {
609         rte_eth_dev_shared_data_prepare();
610
611         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
612
613         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
614
615         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
616         return 0;
617 }
618
619 static int
620 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
621                        const struct rte_eth_dev_owner *new_owner)
622 {
623         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
624         struct rte_eth_dev_owner *port_owner;
625
626         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
627                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
628                         port_id);
629                 return -ENODEV;
630         }
631
632         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
633             !rte_eth_is_valid_owner_id(old_owner_id)) {
634                 RTE_ETHDEV_LOG(ERR,
635                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
636                        old_owner_id, new_owner->id);
637                 return -EINVAL;
638         }
639
640         port_owner = &rte_eth_devices[port_id].data->owner;
641         if (port_owner->id != old_owner_id) {
642                 RTE_ETHDEV_LOG(ERR,
643                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
644                         port_id, port_owner->name, port_owner->id);
645                 return -EPERM;
646         }
647
648         /* can not truncate (same structure) */
649         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
650
651         port_owner->id = new_owner->id;
652
653         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
654                 port_id, new_owner->name, new_owner->id);
655
656         return 0;
657 }
658
659 int
660 rte_eth_dev_owner_set(const uint16_t port_id,
661                       const struct rte_eth_dev_owner *owner)
662 {
663         int ret;
664
665         rte_eth_dev_shared_data_prepare();
666
667         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
668
669         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
670
671         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
672         return ret;
673 }
674
675 int
676 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
677 {
678         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
679                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
680         int ret;
681
682         rte_eth_dev_shared_data_prepare();
683
684         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
685
686         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
687
688         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
689         return ret;
690 }
691
692 int
693 rte_eth_dev_owner_delete(const uint64_t owner_id)
694 {
695         uint16_t port_id;
696         int ret = 0;
697
698         rte_eth_dev_shared_data_prepare();
699
700         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
701
702         if (rte_eth_is_valid_owner_id(owner_id)) {
703                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
704                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
705                                 memset(&rte_eth_devices[port_id].data->owner, 0,
706                                        sizeof(struct rte_eth_dev_owner));
707                 RTE_ETHDEV_LOG(NOTICE,
708                         "All port owners owned by %016"PRIx64" identifier have removed\n",
709                         owner_id);
710         } else {
711                 RTE_ETHDEV_LOG(ERR,
712                                "Invalid owner id=%016"PRIx64"\n",
713                                owner_id);
714                 ret = -EINVAL;
715         }
716
717         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
718
719         return ret;
720 }
721
722 int
723 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
724 {
725         int ret = 0;
726         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
727
728         rte_eth_dev_shared_data_prepare();
729
730         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
731
732         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
733                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
734                         port_id);
735                 ret = -ENODEV;
736         } else {
737                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
738         }
739
740         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
741         return ret;
742 }
743
744 int
745 rte_eth_dev_socket_id(uint16_t port_id)
746 {
747         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
748         return rte_eth_devices[port_id].data->numa_node;
749 }
750
751 void *
752 rte_eth_dev_get_sec_ctx(uint16_t port_id)
753 {
754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
755         return rte_eth_devices[port_id].security_ctx;
756 }
757
758 uint16_t
759 rte_eth_dev_count_avail(void)
760 {
761         uint16_t p;
762         uint16_t count;
763
764         count = 0;
765
766         RTE_ETH_FOREACH_DEV(p)
767                 count++;
768
769         return count;
770 }
771
772 uint16_t
773 rte_eth_dev_count_total(void)
774 {
775         uint16_t port, count = 0;
776
777         RTE_ETH_FOREACH_VALID_DEV(port)
778                 count++;
779
780         return count;
781 }
782
783 int
784 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
785 {
786         char *tmp;
787
788         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
789
790         if (name == NULL) {
791                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
792                 return -EINVAL;
793         }
794
795         /* shouldn't check 'rte_eth_devices[i].data',
796          * because it might be overwritten by VDEV PMD */
797         tmp = rte_eth_dev_shared_data->data[port_id].name;
798         strcpy(name, tmp);
799         return 0;
800 }
801
802 int
803 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
804 {
805         uint32_t pid;
806
807         if (name == NULL) {
808                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
809                 return -EINVAL;
810         }
811
812         RTE_ETH_FOREACH_VALID_DEV(pid)
813                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
814                         *port_id = pid;
815                         return 0;
816                 }
817
818         return -ENODEV;
819 }
820
821 static int
822 eth_err(uint16_t port_id, int ret)
823 {
824         if (ret == 0)
825                 return 0;
826         if (rte_eth_dev_is_removed(port_id))
827                 return -EIO;
828         return ret;
829 }
830
831 static int
832 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
833 {
834         uint16_t old_nb_queues = dev->data->nb_rx_queues;
835         void **rxq;
836         unsigned i;
837
838         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
839                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
840                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
841                                 RTE_CACHE_LINE_SIZE);
842                 if (dev->data->rx_queues == NULL) {
843                         dev->data->nb_rx_queues = 0;
844                         return -(ENOMEM);
845                 }
846         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
847                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
848
849                 rxq = dev->data->rx_queues;
850
851                 for (i = nb_queues; i < old_nb_queues; i++)
852                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
853                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
854                                 RTE_CACHE_LINE_SIZE);
855                 if (rxq == NULL)
856                         return -(ENOMEM);
857                 if (nb_queues > old_nb_queues) {
858                         uint16_t new_qs = nb_queues - old_nb_queues;
859
860                         memset(rxq + old_nb_queues, 0,
861                                 sizeof(rxq[0]) * new_qs);
862                 }
863
864                 dev->data->rx_queues = rxq;
865
866         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
867                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
868
869                 rxq = dev->data->rx_queues;
870
871                 for (i = nb_queues; i < old_nb_queues; i++)
872                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
873
874                 rte_free(dev->data->rx_queues);
875                 dev->data->rx_queues = NULL;
876         }
877         dev->data->nb_rx_queues = nb_queues;
878         return 0;
879 }
880
881 int
882 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
883 {
884         struct rte_eth_dev *dev;
885
886         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
887
888         dev = &rte_eth_devices[port_id];
889         if (!dev->data->dev_started) {
890                 RTE_ETHDEV_LOG(ERR,
891                         "Port %u must be started before start any queue\n",
892                         port_id);
893                 return -EINVAL;
894         }
895
896         if (rx_queue_id >= dev->data->nb_rx_queues) {
897                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
898                 return -EINVAL;
899         }
900
901         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
902
903         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
904                 RTE_ETHDEV_LOG(INFO,
905                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
906                         rx_queue_id, port_id);
907                 return -EINVAL;
908         }
909
910         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
911                 RTE_ETHDEV_LOG(INFO,
912                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
913                         rx_queue_id, port_id);
914                 return 0;
915         }
916
917         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
918                                                              rx_queue_id));
919
920 }
921
922 int
923 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
924 {
925         struct rte_eth_dev *dev;
926
927         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
928
929         dev = &rte_eth_devices[port_id];
930         if (rx_queue_id >= dev->data->nb_rx_queues) {
931                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
932                 return -EINVAL;
933         }
934
935         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
936
937         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
938                 RTE_ETHDEV_LOG(INFO,
939                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
940                         rx_queue_id, port_id);
941                 return -EINVAL;
942         }
943
944         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
945                 RTE_ETHDEV_LOG(INFO,
946                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
947                         rx_queue_id, port_id);
948                 return 0;
949         }
950
951         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
952
953 }
954
955 int
956 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
957 {
958         struct rte_eth_dev *dev;
959
960         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
961
962         dev = &rte_eth_devices[port_id];
963         if (!dev->data->dev_started) {
964                 RTE_ETHDEV_LOG(ERR,
965                         "Port %u must be started before start any queue\n",
966                         port_id);
967                 return -EINVAL;
968         }
969
970         if (tx_queue_id >= dev->data->nb_tx_queues) {
971                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
972                 return -EINVAL;
973         }
974
975         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
976
977         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
978                 RTE_ETHDEV_LOG(INFO,
979                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
980                         tx_queue_id, port_id);
981                 return -EINVAL;
982         }
983
984         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
985                 RTE_ETHDEV_LOG(INFO,
986                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
987                         tx_queue_id, port_id);
988                 return 0;
989         }
990
991         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
992 }
993
994 int
995 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
996 {
997         struct rte_eth_dev *dev;
998
999         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1000
1001         dev = &rte_eth_devices[port_id];
1002         if (tx_queue_id >= dev->data->nb_tx_queues) {
1003                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1004                 return -EINVAL;
1005         }
1006
1007         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1008
1009         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1010                 RTE_ETHDEV_LOG(INFO,
1011                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1012                         tx_queue_id, port_id);
1013                 return -EINVAL;
1014         }
1015
1016         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1017                 RTE_ETHDEV_LOG(INFO,
1018                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1019                         tx_queue_id, port_id);
1020                 return 0;
1021         }
1022
1023         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1024
1025 }
1026
1027 static int
1028 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1029 {
1030         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1031         void **txq;
1032         unsigned i;
1033
1034         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1035                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1036                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1037                                                    RTE_CACHE_LINE_SIZE);
1038                 if (dev->data->tx_queues == NULL) {
1039                         dev->data->nb_tx_queues = 0;
1040                         return -(ENOMEM);
1041                 }
1042         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1043                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1044
1045                 txq = dev->data->tx_queues;
1046
1047                 for (i = nb_queues; i < old_nb_queues; i++)
1048                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1049                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1050                                   RTE_CACHE_LINE_SIZE);
1051                 if (txq == NULL)
1052                         return -ENOMEM;
1053                 if (nb_queues > old_nb_queues) {
1054                         uint16_t new_qs = nb_queues - old_nb_queues;
1055
1056                         memset(txq + old_nb_queues, 0,
1057                                sizeof(txq[0]) * new_qs);
1058                 }
1059
1060                 dev->data->tx_queues = txq;
1061
1062         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1063                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1064
1065                 txq = dev->data->tx_queues;
1066
1067                 for (i = nb_queues; i < old_nb_queues; i++)
1068                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1069
1070                 rte_free(dev->data->tx_queues);
1071                 dev->data->tx_queues = NULL;
1072         }
1073         dev->data->nb_tx_queues = nb_queues;
1074         return 0;
1075 }
1076
1077 uint32_t
1078 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1079 {
1080         switch (speed) {
1081         case ETH_SPEED_NUM_10M:
1082                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1083         case ETH_SPEED_NUM_100M:
1084                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1085         case ETH_SPEED_NUM_1G:
1086                 return ETH_LINK_SPEED_1G;
1087         case ETH_SPEED_NUM_2_5G:
1088                 return ETH_LINK_SPEED_2_5G;
1089         case ETH_SPEED_NUM_5G:
1090                 return ETH_LINK_SPEED_5G;
1091         case ETH_SPEED_NUM_10G:
1092                 return ETH_LINK_SPEED_10G;
1093         case ETH_SPEED_NUM_20G:
1094                 return ETH_LINK_SPEED_20G;
1095         case ETH_SPEED_NUM_25G:
1096                 return ETH_LINK_SPEED_25G;
1097         case ETH_SPEED_NUM_40G:
1098                 return ETH_LINK_SPEED_40G;
1099         case ETH_SPEED_NUM_50G:
1100                 return ETH_LINK_SPEED_50G;
1101         case ETH_SPEED_NUM_56G:
1102                 return ETH_LINK_SPEED_56G;
1103         case ETH_SPEED_NUM_100G:
1104                 return ETH_LINK_SPEED_100G;
1105         default:
1106                 return 0;
1107         }
1108 }
1109
1110 const char *
1111 rte_eth_dev_rx_offload_name(uint64_t offload)
1112 {
1113         const char *name = "UNKNOWN";
1114         unsigned int i;
1115
1116         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1117                 if (offload == rte_rx_offload_names[i].offload) {
1118                         name = rte_rx_offload_names[i].name;
1119                         break;
1120                 }
1121         }
1122
1123         return name;
1124 }
1125
1126 const char *
1127 rte_eth_dev_tx_offload_name(uint64_t offload)
1128 {
1129         const char *name = "UNKNOWN";
1130         unsigned int i;
1131
1132         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1133                 if (offload == rte_tx_offload_names[i].offload) {
1134                         name = rte_tx_offload_names[i].name;
1135                         break;
1136                 }
1137         }
1138
1139         return name;
1140 }
1141
1142 static inline int
1143 check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1144                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1145 {
1146         int ret = 0;
1147
1148         if (dev_info_size == 0) {
1149                 if (config_size != max_rx_pkt_len) {
1150                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1151                                        " %u != %u is not allowed\n",
1152                                        port_id, config_size, max_rx_pkt_len);
1153                         ret = -EINVAL;
1154                 }
1155         } else if (config_size > dev_info_size) {
1156                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1157                                "> max allowed value %u\n", port_id, config_size,
1158                                dev_info_size);
1159                 ret = -EINVAL;
1160         } else if (config_size < RTE_ETHER_MIN_LEN) {
1161                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1162                                "< min allowed value %u\n", port_id, config_size,
1163                                (unsigned int)RTE_ETHER_MIN_LEN);
1164                 ret = -EINVAL;
1165         }
1166         return ret;
1167 }
1168
1169 /*
1170  * Validate offloads that are requested through rte_eth_dev_configure against
1171  * the offloads successfully set by the ethernet device.
1172  *
1173  * @param port_id
1174  *   The port identifier of the Ethernet device.
1175  * @param req_offloads
1176  *   The offloads that have been requested through `rte_eth_dev_configure`.
1177  * @param set_offloads
1178  *   The offloads successfully set by the ethernet device.
1179  * @param offload_type
1180  *   The offload type i.e. Rx/Tx string.
1181  * @param offload_name
1182  *   The function that prints the offload name.
1183  * @return
1184  *   - (0) if validation successful.
1185  *   - (-EINVAL) if requested offload has been silently disabled.
1186  *
1187  */
1188 static int
1189 validate_offloads(uint16_t port_id, uint64_t req_offloads,
1190                   uint64_t set_offloads, const char *offload_type,
1191                   const char *(*offload_name)(uint64_t))
1192 {
1193         uint64_t offloads_diff = req_offloads ^ set_offloads;
1194         uint64_t offload;
1195         int ret = 0;
1196
1197         while (offloads_diff != 0) {
1198                 /* Check if any offload is requested but not enabled. */
1199                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1200                 if (offload & req_offloads) {
1201                         RTE_ETHDEV_LOG(ERR,
1202                                 "Port %u failed to enable %s offload %s\n",
1203                                 port_id, offload_type, offload_name(offload));
1204                         ret = -EINVAL;
1205                 }
1206
1207                 /* Check if offload couldn't be disabled. */
1208                 if (offload & set_offloads) {
1209                         RTE_ETHDEV_LOG(DEBUG,
1210                                 "Port %u %s offload %s is not requested but enabled\n",
1211                                 port_id, offload_type, offload_name(offload));
1212                 }
1213
1214                 offloads_diff &= ~offload;
1215         }
1216
1217         return ret;
1218 }
1219
1220 int
1221 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1222                       const struct rte_eth_conf *dev_conf)
1223 {
1224         struct rte_eth_dev *dev;
1225         struct rte_eth_dev_info dev_info;
1226         struct rte_eth_conf orig_conf;
1227         int diag;
1228         int ret;
1229
1230         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1231
1232         dev = &rte_eth_devices[port_id];
1233
1234         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1235
1236         if (dev->data->dev_started) {
1237                 RTE_ETHDEV_LOG(ERR,
1238                         "Port %u must be stopped to allow configuration\n",
1239                         port_id);
1240                 return -EBUSY;
1241         }
1242
1243          /* Store original config, as rollback required on failure */
1244         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1245
1246         /*
1247          * Copy the dev_conf parameter into the dev structure.
1248          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1249          */
1250         if (dev_conf != &dev->data->dev_conf)
1251                 memcpy(&dev->data->dev_conf, dev_conf,
1252                        sizeof(dev->data->dev_conf));
1253
1254         ret = rte_eth_dev_info_get(port_id, &dev_info);
1255         if (ret != 0)
1256                 goto rollback;
1257
1258         /* If number of queues specified by application for both Rx and Tx is
1259          * zero, use driver preferred values. This cannot be done individually
1260          * as it is valid for either Tx or Rx (but not both) to be zero.
1261          * If driver does not provide any preferred valued, fall back on
1262          * EAL defaults.
1263          */
1264         if (nb_rx_q == 0 && nb_tx_q == 0) {
1265                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1266                 if (nb_rx_q == 0)
1267                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1268                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1269                 if (nb_tx_q == 0)
1270                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1271         }
1272
1273         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1274                 RTE_ETHDEV_LOG(ERR,
1275                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1276                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1277                 ret = -EINVAL;
1278                 goto rollback;
1279         }
1280
1281         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1282                 RTE_ETHDEV_LOG(ERR,
1283                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1284                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1285                 ret = -EINVAL;
1286                 goto rollback;
1287         }
1288
1289         /*
1290          * Check that the numbers of RX and TX queues are not greater
1291          * than the maximum number of RX and TX queues supported by the
1292          * configured device.
1293          */
1294         if (nb_rx_q > dev_info.max_rx_queues) {
1295                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1296                         port_id, nb_rx_q, dev_info.max_rx_queues);
1297                 ret = -EINVAL;
1298                 goto rollback;
1299         }
1300
1301         if (nb_tx_q > dev_info.max_tx_queues) {
1302                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1303                         port_id, nb_tx_q, dev_info.max_tx_queues);
1304                 ret = -EINVAL;
1305                 goto rollback;
1306         }
1307
1308         /* Check that the device supports requested interrupts */
1309         if ((dev_conf->intr_conf.lsc == 1) &&
1310                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1311                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1312                         dev->device->driver->name);
1313                 ret = -EINVAL;
1314                 goto rollback;
1315         }
1316         if ((dev_conf->intr_conf.rmv == 1) &&
1317                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1318                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1319                         dev->device->driver->name);
1320                 ret = -EINVAL;
1321                 goto rollback;
1322         }
1323
1324         /*
1325          * If jumbo frames are enabled, check that the maximum RX packet
1326          * length is supported by the configured device.
1327          */
1328         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1329                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1330                         RTE_ETHDEV_LOG(ERR,
1331                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1332                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1333                                 dev_info.max_rx_pktlen);
1334                         ret = -EINVAL;
1335                         goto rollback;
1336                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1337                         RTE_ETHDEV_LOG(ERR,
1338                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1339                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1340                                 (unsigned int)RTE_ETHER_MIN_LEN);
1341                         ret = -EINVAL;
1342                         goto rollback;
1343                 }
1344         } else {
1345                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1346                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1347                         /* Use default value */
1348                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1349                                                         RTE_ETHER_MAX_LEN;
1350         }
1351
1352         /*
1353          * If LRO is enabled, check that the maximum aggregated packet
1354          * size is supported by the configured device.
1355          */
1356         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1357                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1358                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1359                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1360                 ret = check_lro_pkt_size(port_id,
1361                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1362                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1363                                 dev_info.max_lro_pkt_size);
1364                 if (ret != 0)
1365                         goto rollback;
1366         }
1367
1368         /* Any requested offloading must be within its device capabilities */
1369         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1370              dev_conf->rxmode.offloads) {
1371                 RTE_ETHDEV_LOG(ERR,
1372                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1373                         "capabilities 0x%"PRIx64" in %s()\n",
1374                         port_id, dev_conf->rxmode.offloads,
1375                         dev_info.rx_offload_capa,
1376                         __func__);
1377                 ret = -EINVAL;
1378                 goto rollback;
1379         }
1380         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1381              dev_conf->txmode.offloads) {
1382                 RTE_ETHDEV_LOG(ERR,
1383                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1384                         "capabilities 0x%"PRIx64" in %s()\n",
1385                         port_id, dev_conf->txmode.offloads,
1386                         dev_info.tx_offload_capa,
1387                         __func__);
1388                 ret = -EINVAL;
1389                 goto rollback;
1390         }
1391
1392         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1393                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1394
1395         /* Check that device supports requested rss hash functions. */
1396         if ((dev_info.flow_type_rss_offloads |
1397              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1398             dev_info.flow_type_rss_offloads) {
1399                 RTE_ETHDEV_LOG(ERR,
1400                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1401                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1402                         dev_info.flow_type_rss_offloads);
1403                 ret = -EINVAL;
1404                 goto rollback;
1405         }
1406
1407         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1408         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1409             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1410                 RTE_ETHDEV_LOG(ERR,
1411                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1412                         port_id,
1413                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1414                 ret = -EINVAL;
1415                 goto rollback;
1416         }
1417
1418         /*
1419          * Setup new number of RX/TX queues and reconfigure device.
1420          */
1421         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1422         if (diag != 0) {
1423                 RTE_ETHDEV_LOG(ERR,
1424                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1425                         port_id, diag);
1426                 ret = diag;
1427                 goto rollback;
1428         }
1429
1430         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1431         if (diag != 0) {
1432                 RTE_ETHDEV_LOG(ERR,
1433                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1434                         port_id, diag);
1435                 rte_eth_dev_rx_queue_config(dev, 0);
1436                 ret = diag;
1437                 goto rollback;
1438         }
1439
1440         diag = (*dev->dev_ops->dev_configure)(dev);
1441         if (diag != 0) {
1442                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1443                         port_id, diag);
1444                 ret = eth_err(port_id, diag);
1445                 goto reset_queues;
1446         }
1447
1448         /* Initialize Rx profiling if enabled at compilation time. */
1449         diag = __rte_eth_dev_profile_init(port_id, dev);
1450         if (diag != 0) {
1451                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1452                         port_id, diag);
1453                 ret = eth_err(port_id, diag);
1454                 goto reset_queues;
1455         }
1456
1457         /* Validate Rx offloads. */
1458         diag = validate_offloads(port_id,
1459                         dev_conf->rxmode.offloads,
1460                         dev->data->dev_conf.rxmode.offloads, "Rx",
1461                         rte_eth_dev_rx_offload_name);
1462         if (diag != 0) {
1463                 ret = diag;
1464                 goto reset_queues;
1465         }
1466
1467         /* Validate Tx offloads. */
1468         diag = validate_offloads(port_id,
1469                         dev_conf->txmode.offloads,
1470                         dev->data->dev_conf.txmode.offloads, "Tx",
1471                         rte_eth_dev_tx_offload_name);
1472         if (diag != 0) {
1473                 ret = diag;
1474                 goto reset_queues;
1475         }
1476
1477         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1478         return 0;
1479 reset_queues:
1480         rte_eth_dev_rx_queue_config(dev, 0);
1481         rte_eth_dev_tx_queue_config(dev, 0);
1482 rollback:
1483         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1484
1485         rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1486         return ret;
1487 }
1488
1489 void
1490 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1491 {
1492         if (dev->data->dev_started) {
1493                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1494                         dev->data->port_id);
1495                 return;
1496         }
1497
1498         rte_eth_dev_rx_queue_config(dev, 0);
1499         rte_eth_dev_tx_queue_config(dev, 0);
1500
1501         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1502 }
1503
1504 static void
1505 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1506                         struct rte_eth_dev_info *dev_info)
1507 {
1508         struct rte_ether_addr *addr;
1509         uint16_t i;
1510         uint32_t pool = 0;
1511         uint64_t pool_mask;
1512
1513         /* replay MAC address configuration including default MAC */
1514         addr = &dev->data->mac_addrs[0];
1515         if (*dev->dev_ops->mac_addr_set != NULL)
1516                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1517         else if (*dev->dev_ops->mac_addr_add != NULL)
1518                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1519
1520         if (*dev->dev_ops->mac_addr_add != NULL) {
1521                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1522                         addr = &dev->data->mac_addrs[i];
1523
1524                         /* skip zero address */
1525                         if (rte_is_zero_ether_addr(addr))
1526                                 continue;
1527
1528                         pool = 0;
1529                         pool_mask = dev->data->mac_pool_sel[i];
1530
1531                         do {
1532                                 if (pool_mask & 1ULL)
1533                                         (*dev->dev_ops->mac_addr_add)(dev,
1534                                                 addr, i, pool);
1535                                 pool_mask >>= 1;
1536                                 pool++;
1537                         } while (pool_mask);
1538                 }
1539         }
1540 }
1541
1542 static int
1543 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1544                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1545 {
1546         int ret;
1547
1548         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1549                 rte_eth_dev_mac_restore(dev, dev_info);
1550
1551         /* replay promiscuous configuration */
1552         /*
1553          * use callbacks directly since we don't need port_id check and
1554          * would like to bypass the same value set
1555          */
1556         if (rte_eth_promiscuous_get(port_id) == 1 &&
1557             *dev->dev_ops->promiscuous_enable != NULL) {
1558                 ret = eth_err(port_id,
1559                               (*dev->dev_ops->promiscuous_enable)(dev));
1560                 if (ret != 0 && ret != -ENOTSUP) {
1561                         RTE_ETHDEV_LOG(ERR,
1562                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1563                                 port_id, rte_strerror(-ret));
1564                         return ret;
1565                 }
1566         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1567                    *dev->dev_ops->promiscuous_disable != NULL) {
1568                 ret = eth_err(port_id,
1569                               (*dev->dev_ops->promiscuous_disable)(dev));
1570                 if (ret != 0 && ret != -ENOTSUP) {
1571                         RTE_ETHDEV_LOG(ERR,
1572                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1573                                 port_id, rte_strerror(-ret));
1574                         return ret;
1575                 }
1576         }
1577
1578         /* replay all multicast configuration */
1579         /*
1580          * use callbacks directly since we don't need port_id check and
1581          * would like to bypass the same value set
1582          */
1583         if (rte_eth_allmulticast_get(port_id) == 1 &&
1584             *dev->dev_ops->allmulticast_enable != NULL) {
1585                 ret = eth_err(port_id,
1586                               (*dev->dev_ops->allmulticast_enable)(dev));
1587                 if (ret != 0 && ret != -ENOTSUP) {
1588                         RTE_ETHDEV_LOG(ERR,
1589                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1590                                 port_id, rte_strerror(-ret));
1591                         return ret;
1592                 }
1593         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1594                    *dev->dev_ops->allmulticast_disable != NULL) {
1595                 ret = eth_err(port_id,
1596                               (*dev->dev_ops->allmulticast_disable)(dev));
1597                 if (ret != 0 && ret != -ENOTSUP) {
1598                         RTE_ETHDEV_LOG(ERR,
1599                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1600                                 port_id, rte_strerror(-ret));
1601                         return ret;
1602                 }
1603         }
1604
1605         return 0;
1606 }
1607
1608 int
1609 rte_eth_dev_start(uint16_t port_id)
1610 {
1611         struct rte_eth_dev *dev;
1612         struct rte_eth_dev_info dev_info;
1613         int diag;
1614         int ret;
1615
1616         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1617
1618         dev = &rte_eth_devices[port_id];
1619
1620         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1621
1622         if (dev->data->dev_started != 0) {
1623                 RTE_ETHDEV_LOG(INFO,
1624                         "Device with port_id=%"PRIu16" already started\n",
1625                         port_id);
1626                 return 0;
1627         }
1628
1629         ret = rte_eth_dev_info_get(port_id, &dev_info);
1630         if (ret != 0)
1631                 return ret;
1632
1633         /* Lets restore MAC now if device does not support live change */
1634         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1635                 rte_eth_dev_mac_restore(dev, &dev_info);
1636
1637         diag = (*dev->dev_ops->dev_start)(dev);
1638         if (diag == 0)
1639                 dev->data->dev_started = 1;
1640         else
1641                 return eth_err(port_id, diag);
1642
1643         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1644         if (ret != 0) {
1645                 RTE_ETHDEV_LOG(ERR,
1646                         "Error during restoring configuration for device (port %u): %s\n",
1647                         port_id, rte_strerror(-ret));
1648                 rte_eth_dev_stop(port_id);
1649                 return ret;
1650         }
1651
1652         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1653                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1654                 (*dev->dev_ops->link_update)(dev, 0);
1655         }
1656
1657         rte_ethdev_trace_start(port_id);
1658         return 0;
1659 }
1660
1661 void
1662 rte_eth_dev_stop(uint16_t port_id)
1663 {
1664         struct rte_eth_dev *dev;
1665
1666         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1667         dev = &rte_eth_devices[port_id];
1668
1669         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1670
1671         if (dev->data->dev_started == 0) {
1672                 RTE_ETHDEV_LOG(INFO,
1673                         "Device with port_id=%"PRIu16" already stopped\n",
1674                         port_id);
1675                 return;
1676         }
1677
1678         dev->data->dev_started = 0;
1679         (*dev->dev_ops->dev_stop)(dev);
1680         rte_ethdev_trace_stop(port_id);
1681 }
1682
1683 int
1684 rte_eth_dev_set_link_up(uint16_t port_id)
1685 {
1686         struct rte_eth_dev *dev;
1687
1688         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1689
1690         dev = &rte_eth_devices[port_id];
1691
1692         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1693         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1694 }
1695
1696 int
1697 rte_eth_dev_set_link_down(uint16_t port_id)
1698 {
1699         struct rte_eth_dev *dev;
1700
1701         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1702
1703         dev = &rte_eth_devices[port_id];
1704
1705         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1706         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1707 }
1708
1709 void
1710 rte_eth_dev_close(uint16_t port_id)
1711 {
1712         struct rte_eth_dev *dev;
1713
1714         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1715         dev = &rte_eth_devices[port_id];
1716
1717         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1718         dev->data->dev_started = 0;
1719         (*dev->dev_ops->dev_close)(dev);
1720
1721         rte_ethdev_trace_close(port_id);
1722         /* check behaviour flag - temporary for PMD migration */
1723         if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1724                 /* new behaviour: send event + reset state + free all data */
1725                 rte_eth_dev_release_port(dev);
1726                 return;
1727         }
1728         RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1729                         "The driver %s should migrate to the new behaviour.\n",
1730                         dev->device->driver->name);
1731         /* old behaviour: only free queue arrays */
1732         dev->data->nb_rx_queues = 0;
1733         rte_free(dev->data->rx_queues);
1734         dev->data->rx_queues = NULL;
1735         dev->data->nb_tx_queues = 0;
1736         rte_free(dev->data->tx_queues);
1737         dev->data->tx_queues = NULL;
1738 }
1739
1740 int
1741 rte_eth_dev_reset(uint16_t port_id)
1742 {
1743         struct rte_eth_dev *dev;
1744         int ret;
1745
1746         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1747         dev = &rte_eth_devices[port_id];
1748
1749         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1750
1751         rte_eth_dev_stop(port_id);
1752         ret = dev->dev_ops->dev_reset(dev);
1753
1754         return eth_err(port_id, ret);
1755 }
1756
1757 int
1758 rte_eth_dev_is_removed(uint16_t port_id)
1759 {
1760         struct rte_eth_dev *dev;
1761         int ret;
1762
1763         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1764
1765         dev = &rte_eth_devices[port_id];
1766
1767         if (dev->state == RTE_ETH_DEV_REMOVED)
1768                 return 1;
1769
1770         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1771
1772         ret = dev->dev_ops->is_removed(dev);
1773         if (ret != 0)
1774                 /* Device is physically removed. */
1775                 dev->state = RTE_ETH_DEV_REMOVED;
1776
1777         return ret;
1778 }
1779
1780 int
1781 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1782                        uint16_t nb_rx_desc, unsigned int socket_id,
1783                        const struct rte_eth_rxconf *rx_conf,
1784                        struct rte_mempool *mp)
1785 {
1786         int ret;
1787         uint32_t mbp_buf_size;
1788         struct rte_eth_dev *dev;
1789         struct rte_eth_dev_info dev_info;
1790         struct rte_eth_rxconf local_conf;
1791         void **rxq;
1792
1793         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1794
1795         dev = &rte_eth_devices[port_id];
1796         if (rx_queue_id >= dev->data->nb_rx_queues) {
1797                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1798                 return -EINVAL;
1799         }
1800
1801         if (mp == NULL) {
1802                 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1803                 return -EINVAL;
1804         }
1805
1806         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1807
1808         /*
1809          * Check the size of the mbuf data buffer.
1810          * This value must be provided in the private data of the memory pool.
1811          * First check that the memory pool has a valid private data.
1812          */
1813         ret = rte_eth_dev_info_get(port_id, &dev_info);
1814         if (ret != 0)
1815                 return ret;
1816
1817         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1818                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1819                         mp->name, (int)mp->private_data_size,
1820                         (int)sizeof(struct rte_pktmbuf_pool_private));
1821                 return -ENOSPC;
1822         }
1823         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1824
1825         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1826                 RTE_ETHDEV_LOG(ERR,
1827                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1828                         mp->name, (int)mbp_buf_size,
1829                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1830                         (int)RTE_PKTMBUF_HEADROOM,
1831                         (int)dev_info.min_rx_bufsize);
1832                 return -EINVAL;
1833         }
1834
1835         /* Use default specified by driver, if nb_rx_desc is zero */
1836         if (nb_rx_desc == 0) {
1837                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1838                 /* If driver default is also zero, fall back on EAL default */
1839                 if (nb_rx_desc == 0)
1840                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1841         }
1842
1843         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1844                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1845                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1846
1847                 RTE_ETHDEV_LOG(ERR,
1848                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1849                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1850                         dev_info.rx_desc_lim.nb_min,
1851                         dev_info.rx_desc_lim.nb_align);
1852                 return -EINVAL;
1853         }
1854
1855         if (dev->data->dev_started &&
1856                 !(dev_info.dev_capa &
1857                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1858                 return -EBUSY;
1859
1860         if (dev->data->dev_started &&
1861                 (dev->data->rx_queue_state[rx_queue_id] !=
1862                         RTE_ETH_QUEUE_STATE_STOPPED))
1863                 return -EBUSY;
1864
1865         rxq = dev->data->rx_queues;
1866         if (rxq[rx_queue_id]) {
1867                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1868                                         -ENOTSUP);
1869                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1870                 rxq[rx_queue_id] = NULL;
1871         }
1872
1873         if (rx_conf == NULL)
1874                 rx_conf = &dev_info.default_rxconf;
1875
1876         local_conf = *rx_conf;
1877
1878         /*
1879          * If an offloading has already been enabled in
1880          * rte_eth_dev_configure(), it has been enabled on all queues,
1881          * so there is no need to enable it in this queue again.
1882          * The local_conf.offloads input to underlying PMD only carries
1883          * those offloadings which are only enabled on this queue and
1884          * not enabled on all queues.
1885          */
1886         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1887
1888         /*
1889          * New added offloadings for this queue are those not enabled in
1890          * rte_eth_dev_configure() and they must be per-queue type.
1891          * A pure per-port offloading can't be enabled on a queue while
1892          * disabled on another queue. A pure per-port offloading can't
1893          * be enabled for any queue as new added one if it hasn't been
1894          * enabled in rte_eth_dev_configure().
1895          */
1896         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1897              local_conf.offloads) {
1898                 RTE_ETHDEV_LOG(ERR,
1899                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1900                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1901                         port_id, rx_queue_id, local_conf.offloads,
1902                         dev_info.rx_queue_offload_capa,
1903                         __func__);
1904                 return -EINVAL;
1905         }
1906
1907         /*
1908          * If LRO is enabled, check that the maximum aggregated packet
1909          * size is supported by the configured device.
1910          */
1911         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1912                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1913                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1914                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1915                 int ret = check_lro_pkt_size(port_id,
1916                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1917                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1918                                 dev_info.max_lro_pkt_size);
1919                 if (ret != 0)
1920                         return ret;
1921         }
1922
1923         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1924                                               socket_id, &local_conf, mp);
1925         if (!ret) {
1926                 if (!dev->data->min_rx_buf_size ||
1927                     dev->data->min_rx_buf_size > mbp_buf_size)
1928                         dev->data->min_rx_buf_size = mbp_buf_size;
1929         }
1930
1931         rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
1932                 rx_conf, ret);
1933         return eth_err(port_id, ret);
1934 }
1935
1936 int
1937 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1938                                uint16_t nb_rx_desc,
1939                                const struct rte_eth_hairpin_conf *conf)
1940 {
1941         int ret;
1942         struct rte_eth_dev *dev;
1943         struct rte_eth_hairpin_cap cap;
1944         void **rxq;
1945         int i;
1946         int count;
1947
1948         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1949
1950         dev = &rte_eth_devices[port_id];
1951         if (rx_queue_id >= dev->data->nb_rx_queues) {
1952                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1953                 return -EINVAL;
1954         }
1955         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1956         if (ret != 0)
1957                 return ret;
1958         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1959                                 -ENOTSUP);
1960         /* if nb_rx_desc is zero use max number of desc from the driver. */
1961         if (nb_rx_desc == 0)
1962                 nb_rx_desc = cap.max_nb_desc;
1963         if (nb_rx_desc > cap.max_nb_desc) {
1964                 RTE_ETHDEV_LOG(ERR,
1965                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1966                         nb_rx_desc, cap.max_nb_desc);
1967                 return -EINVAL;
1968         }
1969         if (conf->peer_count > cap.max_rx_2_tx) {
1970                 RTE_ETHDEV_LOG(ERR,
1971                         "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
1972                         conf->peer_count, cap.max_rx_2_tx);
1973                 return -EINVAL;
1974         }
1975         if (conf->peer_count == 0) {
1976                 RTE_ETHDEV_LOG(ERR,
1977                         "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
1978                         conf->peer_count);
1979                 return -EINVAL;
1980         }
1981         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1982              cap.max_nb_queues != UINT16_MAX; i++) {
1983                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1984                         count++;
1985         }
1986         if (count > cap.max_nb_queues) {
1987                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1988                 cap.max_nb_queues);
1989                 return -EINVAL;
1990         }
1991         if (dev->data->dev_started)
1992                 return -EBUSY;
1993         rxq = dev->data->rx_queues;
1994         if (rxq[rx_queue_id] != NULL) {
1995                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1996                                         -ENOTSUP);
1997                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1998                 rxq[rx_queue_id] = NULL;
1999         }
2000         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2001                                                       nb_rx_desc, conf);
2002         if (ret == 0)
2003                 dev->data->rx_queue_state[rx_queue_id] =
2004                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2005         return eth_err(port_id, ret);
2006 }
2007
2008 int
2009 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2010                        uint16_t nb_tx_desc, unsigned int socket_id,
2011                        const struct rte_eth_txconf *tx_conf)
2012 {
2013         struct rte_eth_dev *dev;
2014         struct rte_eth_dev_info dev_info;
2015         struct rte_eth_txconf local_conf;
2016         void **txq;
2017         int ret;
2018
2019         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2020
2021         dev = &rte_eth_devices[port_id];
2022         if (tx_queue_id >= dev->data->nb_tx_queues) {
2023                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2024                 return -EINVAL;
2025         }
2026
2027         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2028
2029         ret = rte_eth_dev_info_get(port_id, &dev_info);
2030         if (ret != 0)
2031                 return ret;
2032
2033         /* Use default specified by driver, if nb_tx_desc is zero */
2034         if (nb_tx_desc == 0) {
2035                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2036                 /* If driver default is zero, fall back on EAL default */
2037                 if (nb_tx_desc == 0)
2038                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2039         }
2040         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2041             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2042             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2043                 RTE_ETHDEV_LOG(ERR,
2044                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2045                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2046                         dev_info.tx_desc_lim.nb_min,
2047                         dev_info.tx_desc_lim.nb_align);
2048                 return -EINVAL;
2049         }
2050
2051         if (dev->data->dev_started &&
2052                 !(dev_info.dev_capa &
2053                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2054                 return -EBUSY;
2055
2056         if (dev->data->dev_started &&
2057                 (dev->data->tx_queue_state[tx_queue_id] !=
2058                         RTE_ETH_QUEUE_STATE_STOPPED))
2059                 return -EBUSY;
2060
2061         txq = dev->data->tx_queues;
2062         if (txq[tx_queue_id]) {
2063                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2064                                         -ENOTSUP);
2065                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2066                 txq[tx_queue_id] = NULL;
2067         }
2068
2069         if (tx_conf == NULL)
2070                 tx_conf = &dev_info.default_txconf;
2071
2072         local_conf = *tx_conf;
2073
2074         /*
2075          * If an offloading has already been enabled in
2076          * rte_eth_dev_configure(), it has been enabled on all queues,
2077          * so there is no need to enable it in this queue again.
2078          * The local_conf.offloads input to underlying PMD only carries
2079          * those offloadings which are only enabled on this queue and
2080          * not enabled on all queues.
2081          */
2082         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2083
2084         /*
2085          * New added offloadings for this queue are those not enabled in
2086          * rte_eth_dev_configure() and they must be per-queue type.
2087          * A pure per-port offloading can't be enabled on a queue while
2088          * disabled on another queue. A pure per-port offloading can't
2089          * be enabled for any queue as new added one if it hasn't been
2090          * enabled in rte_eth_dev_configure().
2091          */
2092         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2093              local_conf.offloads) {
2094                 RTE_ETHDEV_LOG(ERR,
2095                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2096                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2097                         port_id, tx_queue_id, local_conf.offloads,
2098                         dev_info.tx_queue_offload_capa,
2099                         __func__);
2100                 return -EINVAL;
2101         }
2102
2103         rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2104         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2105                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2106 }
2107
2108 int
2109 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2110                                uint16_t nb_tx_desc,
2111                                const struct rte_eth_hairpin_conf *conf)
2112 {
2113         struct rte_eth_dev *dev;
2114         struct rte_eth_hairpin_cap cap;
2115         void **txq;
2116         int i;
2117         int count;
2118         int ret;
2119
2120         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2121         dev = &rte_eth_devices[port_id];
2122         if (tx_queue_id >= dev->data->nb_tx_queues) {
2123                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2124                 return -EINVAL;
2125         }
2126         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2127         if (ret != 0)
2128                 return ret;
2129         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2130                                 -ENOTSUP);
2131         /* if nb_rx_desc is zero use max number of desc from the driver. */
2132         if (nb_tx_desc == 0)
2133                 nb_tx_desc = cap.max_nb_desc;
2134         if (nb_tx_desc > cap.max_nb_desc) {
2135                 RTE_ETHDEV_LOG(ERR,
2136                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2137                         nb_tx_desc, cap.max_nb_desc);
2138                 return -EINVAL;
2139         }
2140         if (conf->peer_count > cap.max_tx_2_rx) {
2141                 RTE_ETHDEV_LOG(ERR,
2142                         "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
2143                         conf->peer_count, cap.max_tx_2_rx);
2144                 return -EINVAL;
2145         }
2146         if (conf->peer_count == 0) {
2147                 RTE_ETHDEV_LOG(ERR,
2148                         "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
2149                         conf->peer_count);
2150                 return -EINVAL;
2151         }
2152         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2153              cap.max_nb_queues != UINT16_MAX; i++) {
2154                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2155                         count++;
2156         }
2157         if (count > cap.max_nb_queues) {
2158                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2159                 cap.max_nb_queues);
2160                 return -EINVAL;
2161         }
2162         if (dev->data->dev_started)
2163                 return -EBUSY;
2164         txq = dev->data->tx_queues;
2165         if (txq[tx_queue_id] != NULL) {
2166                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2167                                         -ENOTSUP);
2168                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2169                 txq[tx_queue_id] = NULL;
2170         }
2171         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2172                 (dev, tx_queue_id, nb_tx_desc, conf);
2173         if (ret == 0)
2174                 dev->data->tx_queue_state[tx_queue_id] =
2175                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2176         return eth_err(port_id, ret);
2177 }
2178
2179 void
2180 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2181                 void *userdata __rte_unused)
2182 {
2183         unsigned i;
2184
2185         for (i = 0; i < unsent; i++)
2186                 rte_pktmbuf_free(pkts[i]);
2187 }
2188
2189 void
2190 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2191                 void *userdata)
2192 {
2193         uint64_t *count = userdata;
2194         unsigned i;
2195
2196         for (i = 0; i < unsent; i++)
2197                 rte_pktmbuf_free(pkts[i]);
2198
2199         *count += unsent;
2200 }
2201
2202 int
2203 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2204                 buffer_tx_error_fn cbfn, void *userdata)
2205 {
2206         buffer->error_callback = cbfn;
2207         buffer->error_userdata = userdata;
2208         return 0;
2209 }
2210
2211 int
2212 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2213 {
2214         int ret = 0;
2215
2216         if (buffer == NULL)
2217                 return -EINVAL;
2218
2219         buffer->size = size;
2220         if (buffer->error_callback == NULL) {
2221                 ret = rte_eth_tx_buffer_set_err_callback(
2222                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2223         }
2224
2225         return ret;
2226 }
2227
2228 int
2229 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2230 {
2231         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2232         int ret;
2233
2234         /* Validate Input Data. Bail if not valid or not supported. */
2235         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2236         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2237
2238         /* Call driver to free pending mbufs. */
2239         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2240                                                free_cnt);
2241         return eth_err(port_id, ret);
2242 }
2243
2244 int
2245 rte_eth_promiscuous_enable(uint16_t port_id)
2246 {
2247         struct rte_eth_dev *dev;
2248         int diag = 0;
2249
2250         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2251         dev = &rte_eth_devices[port_id];
2252
2253         if (dev->data->promiscuous == 1)
2254                 return 0;
2255
2256         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2257
2258         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2259         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2260
2261         return eth_err(port_id, diag);
2262 }
2263
2264 int
2265 rte_eth_promiscuous_disable(uint16_t port_id)
2266 {
2267         struct rte_eth_dev *dev;
2268         int diag = 0;
2269
2270         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2271         dev = &rte_eth_devices[port_id];
2272
2273         if (dev->data->promiscuous == 0)
2274                 return 0;
2275
2276         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2277
2278         dev->data->promiscuous = 0;
2279         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2280         if (diag != 0)
2281                 dev->data->promiscuous = 1;
2282
2283         return eth_err(port_id, diag);
2284 }
2285
2286 int
2287 rte_eth_promiscuous_get(uint16_t port_id)
2288 {
2289         struct rte_eth_dev *dev;
2290
2291         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2292
2293         dev = &rte_eth_devices[port_id];
2294         return dev->data->promiscuous;
2295 }
2296
2297 int
2298 rte_eth_allmulticast_enable(uint16_t port_id)
2299 {
2300         struct rte_eth_dev *dev;
2301         int diag;
2302
2303         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2304         dev = &rte_eth_devices[port_id];
2305
2306         if (dev->data->all_multicast == 1)
2307                 return 0;
2308
2309         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2310         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2311         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2312
2313         return eth_err(port_id, diag);
2314 }
2315
2316 int
2317 rte_eth_allmulticast_disable(uint16_t port_id)
2318 {
2319         struct rte_eth_dev *dev;
2320         int diag;
2321
2322         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2323         dev = &rte_eth_devices[port_id];
2324
2325         if (dev->data->all_multicast == 0)
2326                 return 0;
2327
2328         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2329         dev->data->all_multicast = 0;
2330         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2331         if (diag != 0)
2332                 dev->data->all_multicast = 1;
2333
2334         return eth_err(port_id, diag);
2335 }
2336
2337 int
2338 rte_eth_allmulticast_get(uint16_t port_id)
2339 {
2340         struct rte_eth_dev *dev;
2341
2342         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2343
2344         dev = &rte_eth_devices[port_id];
2345         return dev->data->all_multicast;
2346 }
2347
2348 int
2349 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2350 {
2351         struct rte_eth_dev *dev;
2352
2353         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2354         dev = &rte_eth_devices[port_id];
2355
2356         if (dev->data->dev_conf.intr_conf.lsc &&
2357             dev->data->dev_started)
2358                 rte_eth_linkstatus_get(dev, eth_link);
2359         else {
2360                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2361                 (*dev->dev_ops->link_update)(dev, 1);
2362                 *eth_link = dev->data->dev_link;
2363         }
2364
2365         return 0;
2366 }
2367
2368 int
2369 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2370 {
2371         struct rte_eth_dev *dev;
2372
2373         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2374         dev = &rte_eth_devices[port_id];
2375
2376         if (dev->data->dev_conf.intr_conf.lsc &&
2377             dev->data->dev_started)
2378                 rte_eth_linkstatus_get(dev, eth_link);
2379         else {
2380                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2381                 (*dev->dev_ops->link_update)(dev, 0);
2382                 *eth_link = dev->data->dev_link;
2383         }
2384
2385         return 0;
2386 }
2387
2388 int
2389 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2390 {
2391         struct rte_eth_dev *dev;
2392
2393         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2394
2395         dev = &rte_eth_devices[port_id];
2396         memset(stats, 0, sizeof(*stats));
2397
2398         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2399         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2400         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2401 }
2402
2403 int
2404 rte_eth_stats_reset(uint16_t port_id)
2405 {
2406         struct rte_eth_dev *dev;
2407         int ret;
2408
2409         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2410         dev = &rte_eth_devices[port_id];
2411
2412         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2413         ret = (*dev->dev_ops->stats_reset)(dev);
2414         if (ret != 0)
2415                 return eth_err(port_id, ret);
2416
2417         dev->data->rx_mbuf_alloc_failed = 0;
2418
2419         return 0;
2420 }
2421
2422 static inline int
2423 get_xstats_basic_count(struct rte_eth_dev *dev)
2424 {
2425         uint16_t nb_rxqs, nb_txqs;
2426         int count;
2427
2428         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2429         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2430
2431         count = RTE_NB_STATS;
2432         count += nb_rxqs * RTE_NB_RXQ_STATS;
2433         count += nb_txqs * RTE_NB_TXQ_STATS;
2434
2435         return count;
2436 }
2437
2438 static int
2439 get_xstats_count(uint16_t port_id)
2440 {
2441         struct rte_eth_dev *dev;
2442         int count;
2443
2444         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2445         dev = &rte_eth_devices[port_id];
2446         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2447                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2448                                 NULL, 0);
2449                 if (count < 0)
2450                         return eth_err(port_id, count);
2451         }
2452         if (dev->dev_ops->xstats_get_names != NULL) {
2453                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2454                 if (count < 0)
2455                         return eth_err(port_id, count);
2456         } else
2457                 count = 0;
2458
2459
2460         count += get_xstats_basic_count(dev);
2461
2462         return count;
2463 }
2464
2465 int
2466 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2467                 uint64_t *id)
2468 {
2469         int cnt_xstats, idx_xstat;
2470
2471         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2472
2473         if (!id) {
2474                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2475                 return -ENOMEM;
2476         }
2477
2478         if (!xstat_name) {
2479                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2480                 return -ENOMEM;
2481         }
2482
2483         /* Get count */
2484         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2485         if (cnt_xstats  < 0) {
2486                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2487                 return -ENODEV;
2488         }
2489
2490         /* Get id-name lookup table */
2491         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2492
2493         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2494                         port_id, xstats_names, cnt_xstats, NULL)) {
2495                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2496                 return -1;
2497         }
2498
2499         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2500                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2501                         *id = idx_xstat;
2502                         return 0;
2503                 };
2504         }
2505
2506         return -EINVAL;
2507 }
2508
2509 /* retrieve basic stats names */
2510 static int
2511 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2512         struct rte_eth_xstat_name *xstats_names)
2513 {
2514         int cnt_used_entries = 0;
2515         uint32_t idx, id_queue;
2516         uint16_t num_q;
2517
2518         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2519                 strlcpy(xstats_names[cnt_used_entries].name,
2520                         rte_stats_strings[idx].name,
2521                         sizeof(xstats_names[0].name));
2522                 cnt_used_entries++;
2523         }
2524         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2525         for (id_queue = 0; id_queue < num_q; id_queue++) {
2526                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2527                         snprintf(xstats_names[cnt_used_entries].name,
2528                                 sizeof(xstats_names[0].name),
2529                                 "rx_q%u%s",
2530                                 id_queue, rte_rxq_stats_strings[idx].name);
2531                         cnt_used_entries++;
2532                 }
2533
2534         }
2535         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2536         for (id_queue = 0; id_queue < num_q; id_queue++) {
2537                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2538                         snprintf(xstats_names[cnt_used_entries].name,
2539                                 sizeof(xstats_names[0].name),
2540                                 "tx_q%u%s",
2541                                 id_queue, rte_txq_stats_strings[idx].name);
2542                         cnt_used_entries++;
2543                 }
2544         }
2545         return cnt_used_entries;
2546 }
2547
2548 /* retrieve ethdev extended statistics names */
2549 int
2550 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2551         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2552         uint64_t *ids)
2553 {
2554         struct rte_eth_xstat_name *xstats_names_copy;
2555         unsigned int no_basic_stat_requested = 1;
2556         unsigned int no_ext_stat_requested = 1;
2557         unsigned int expected_entries;
2558         unsigned int basic_count;
2559         struct rte_eth_dev *dev;
2560         unsigned int i;
2561         int ret;
2562
2563         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2564         dev = &rte_eth_devices[port_id];
2565
2566         basic_count = get_xstats_basic_count(dev);
2567         ret = get_xstats_count(port_id);
2568         if (ret < 0)
2569                 return ret;
2570         expected_entries = (unsigned int)ret;
2571
2572         /* Return max number of stats if no ids given */
2573         if (!ids) {
2574                 if (!xstats_names)
2575                         return expected_entries;
2576                 else if (xstats_names && size < expected_entries)
2577                         return expected_entries;
2578         }
2579
2580         if (ids && !xstats_names)
2581                 return -EINVAL;
2582
2583         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2584                 uint64_t ids_copy[size];
2585
2586                 for (i = 0; i < size; i++) {
2587                         if (ids[i] < basic_count) {
2588                                 no_basic_stat_requested = 0;
2589                                 break;
2590                         }
2591
2592                         /*
2593                          * Convert ids to xstats ids that PMD knows.
2594                          * ids known by user are basic + extended stats.
2595                          */
2596                         ids_copy[i] = ids[i] - basic_count;
2597                 }
2598
2599                 if (no_basic_stat_requested)
2600                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2601                                         xstats_names, ids_copy, size);
2602         }
2603
2604         /* Retrieve all stats */
2605         if (!ids) {
2606                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2607                                 expected_entries);
2608                 if (num_stats < 0 || num_stats > (int)expected_entries)
2609                         return num_stats;
2610                 else
2611                         return expected_entries;
2612         }
2613
2614         xstats_names_copy = calloc(expected_entries,
2615                 sizeof(struct rte_eth_xstat_name));
2616
2617         if (!xstats_names_copy) {
2618                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2619                 return -ENOMEM;
2620         }
2621
2622         if (ids) {
2623                 for (i = 0; i < size; i++) {
2624                         if (ids[i] >= basic_count) {
2625                                 no_ext_stat_requested = 0;
2626                                 break;
2627                         }
2628                 }
2629         }
2630
2631         /* Fill xstats_names_copy structure */
2632         if (ids && no_ext_stat_requested) {
2633                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2634         } else {
2635                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2636                         expected_entries);
2637                 if (ret < 0) {
2638                         free(xstats_names_copy);
2639                         return ret;
2640                 }
2641         }
2642
2643         /* Filter stats */
2644         for (i = 0; i < size; i++) {
2645                 if (ids[i] >= expected_entries) {
2646                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2647                         free(xstats_names_copy);
2648                         return -1;
2649                 }
2650                 xstats_names[i] = xstats_names_copy[ids[i]];
2651         }
2652
2653         free(xstats_names_copy);
2654         return size;
2655 }
2656
2657 int
2658 rte_eth_xstats_get_names(uint16_t port_id,
2659         struct rte_eth_xstat_name *xstats_names,
2660         unsigned int size)
2661 {
2662         struct rte_eth_dev *dev;
2663         int cnt_used_entries;
2664         int cnt_expected_entries;
2665         int cnt_driver_entries;
2666
2667         cnt_expected_entries = get_xstats_count(port_id);
2668         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2669                         (int)size < cnt_expected_entries)
2670                 return cnt_expected_entries;
2671
2672         /* port_id checked in get_xstats_count() */
2673         dev = &rte_eth_devices[port_id];
2674
2675         cnt_used_entries = rte_eth_basic_stats_get_names(
2676                 dev, xstats_names);
2677
2678         if (dev->dev_ops->xstats_get_names != NULL) {
2679                 /* If there are any driver-specific xstats, append them
2680                  * to end of list.
2681                  */
2682                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2683                         dev,
2684                         xstats_names + cnt_used_entries,
2685                         size - cnt_used_entries);
2686                 if (cnt_driver_entries < 0)
2687                         return eth_err(port_id, cnt_driver_entries);
2688                 cnt_used_entries += cnt_driver_entries;
2689         }
2690
2691         return cnt_used_entries;
2692 }
2693
2694
2695 static int
2696 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2697 {
2698         struct rte_eth_dev *dev;
2699         struct rte_eth_stats eth_stats;
2700         unsigned int count = 0, i, q;
2701         uint64_t val, *stats_ptr;
2702         uint16_t nb_rxqs, nb_txqs;
2703         int ret;
2704
2705         ret = rte_eth_stats_get(port_id, &eth_stats);
2706         if (ret < 0)
2707                 return ret;
2708
2709         dev = &rte_eth_devices[port_id];
2710
2711         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2712         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2713
2714         /* global stats */
2715         for (i = 0; i < RTE_NB_STATS; i++) {
2716                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2717                                         rte_stats_strings[i].offset);
2718                 val = *stats_ptr;
2719                 xstats[count++].value = val;
2720         }
2721
2722         /* per-rxq stats */
2723         for (q = 0; q < nb_rxqs; q++) {
2724                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2725                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2726                                         rte_rxq_stats_strings[i].offset +
2727                                         q * sizeof(uint64_t));
2728                         val = *stats_ptr;
2729                         xstats[count++].value = val;
2730                 }
2731         }
2732
2733         /* per-txq stats */
2734         for (q = 0; q < nb_txqs; q++) {
2735                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2736                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2737                                         rte_txq_stats_strings[i].offset +
2738                                         q * sizeof(uint64_t));
2739                         val = *stats_ptr;
2740                         xstats[count++].value = val;
2741                 }
2742         }
2743         return count;
2744 }
2745
2746 /* retrieve ethdev extended statistics */
2747 int
2748 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2749                          uint64_t *values, unsigned int size)
2750 {
2751         unsigned int no_basic_stat_requested = 1;
2752         unsigned int no_ext_stat_requested = 1;
2753         unsigned int num_xstats_filled;
2754         unsigned int basic_count;
2755         uint16_t expected_entries;
2756         struct rte_eth_dev *dev;
2757         unsigned int i;
2758         int ret;
2759
2760         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2761         ret = get_xstats_count(port_id);
2762         if (ret < 0)
2763                 return ret;
2764         expected_entries = (uint16_t)ret;
2765         struct rte_eth_xstat xstats[expected_entries];
2766         dev = &rte_eth_devices[port_id];
2767         basic_count = get_xstats_basic_count(dev);
2768
2769         /* Return max number of stats if no ids given */
2770         if (!ids) {
2771                 if (!values)
2772                         return expected_entries;
2773                 else if (values && size < expected_entries)
2774                         return expected_entries;
2775         }
2776
2777         if (ids && !values)
2778                 return -EINVAL;
2779
2780         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2781                 unsigned int basic_count = get_xstats_basic_count(dev);
2782                 uint64_t ids_copy[size];
2783
2784                 for (i = 0; i < size; i++) {
2785                         if (ids[i] < basic_count) {
2786                                 no_basic_stat_requested = 0;
2787                                 break;
2788                         }
2789
2790                         /*
2791                          * Convert ids to xstats ids that PMD knows.
2792                          * ids known by user are basic + extended stats.
2793                          */
2794                         ids_copy[i] = ids[i] - basic_count;
2795                 }
2796
2797                 if (no_basic_stat_requested)
2798                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2799                                         values, size);
2800         }
2801
2802         if (ids) {
2803                 for (i = 0; i < size; i++) {
2804                         if (ids[i] >= basic_count) {
2805                                 no_ext_stat_requested = 0;
2806                                 break;
2807                         }
2808                 }
2809         }
2810
2811         /* Fill the xstats structure */
2812         if (ids && no_ext_stat_requested)
2813                 ret = rte_eth_basic_stats_get(port_id, xstats);
2814         else
2815                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2816
2817         if (ret < 0)
2818                 return ret;
2819         num_xstats_filled = (unsigned int)ret;
2820
2821         /* Return all stats */
2822         if (!ids) {
2823                 for (i = 0; i < num_xstats_filled; i++)
2824                         values[i] = xstats[i].value;
2825                 return expected_entries;
2826         }
2827
2828         /* Filter stats */
2829         for (i = 0; i < size; i++) {
2830                 if (ids[i] >= expected_entries) {
2831                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2832                         return -1;
2833                 }
2834                 values[i] = xstats[ids[i]].value;
2835         }
2836         return size;
2837 }
2838
2839 int
2840 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2841         unsigned int n)
2842 {
2843         struct rte_eth_dev *dev;
2844         unsigned int count = 0, i;
2845         signed int xcount = 0;
2846         uint16_t nb_rxqs, nb_txqs;
2847         int ret;
2848
2849         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2850
2851         dev = &rte_eth_devices[port_id];
2852
2853         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2854         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2855
2856         /* Return generic statistics */
2857         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2858                 (nb_txqs * RTE_NB_TXQ_STATS);
2859
2860         /* implemented by the driver */
2861         if (dev->dev_ops->xstats_get != NULL) {
2862                 /* Retrieve the xstats from the driver at the end of the
2863                  * xstats struct.
2864                  */
2865                 xcount = (*dev->dev_ops->xstats_get)(dev,
2866                                      xstats ? xstats + count : NULL,
2867                                      (n > count) ? n - count : 0);
2868
2869                 if (xcount < 0)
2870                         return eth_err(port_id, xcount);
2871         }
2872
2873         if (n < count + xcount || xstats == NULL)
2874                 return count + xcount;
2875
2876         /* now fill the xstats structure */
2877         ret = rte_eth_basic_stats_get(port_id, xstats);
2878         if (ret < 0)
2879                 return ret;
2880         count = ret;
2881
2882         for (i = 0; i < count; i++)
2883                 xstats[i].id = i;
2884         /* add an offset to driver-specific stats */
2885         for ( ; i < count + xcount; i++)
2886                 xstats[i].id += count;
2887
2888         return count + xcount;
2889 }
2890
2891 /* reset ethdev extended statistics */
2892 int
2893 rte_eth_xstats_reset(uint16_t port_id)
2894 {
2895         struct rte_eth_dev *dev;
2896
2897         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2898         dev = &rte_eth_devices[port_id];
2899
2900         /* implemented by the driver */
2901         if (dev->dev_ops->xstats_reset != NULL)
2902                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
2903
2904         /* fallback to default */
2905         return rte_eth_stats_reset(port_id);
2906 }
2907
2908 static int
2909 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2910                 uint8_t is_rx)
2911 {
2912         struct rte_eth_dev *dev;
2913
2914         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2915
2916         dev = &rte_eth_devices[port_id];
2917
2918         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2919
2920         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2921                 return -EINVAL;
2922
2923         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2924                 return -EINVAL;
2925
2926         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2927                 return -EINVAL;
2928
2929         return (*dev->dev_ops->queue_stats_mapping_set)
2930                         (dev, queue_id, stat_idx, is_rx);
2931 }
2932
2933
2934 int
2935 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2936                 uint8_t stat_idx)
2937 {
2938         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2939                                                 stat_idx, STAT_QMAP_TX));
2940 }
2941
2942
2943 int
2944 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2945                 uint8_t stat_idx)
2946 {
2947         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2948                                                 stat_idx, STAT_QMAP_RX));
2949 }
2950
2951 int
2952 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2953 {
2954         struct rte_eth_dev *dev;
2955
2956         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2957         dev = &rte_eth_devices[port_id];
2958
2959         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2960         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2961                                                         fw_version, fw_size));
2962 }
2963
2964 int
2965 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2966 {
2967         struct rte_eth_dev *dev;
2968         const struct rte_eth_desc_lim lim = {
2969                 .nb_max = UINT16_MAX,
2970                 .nb_min = 0,
2971                 .nb_align = 1,
2972                 .nb_seg_max = UINT16_MAX,
2973                 .nb_mtu_seg_max = UINT16_MAX,
2974         };
2975         int diag;
2976
2977         /*
2978          * Init dev_info before port_id check since caller does not have
2979          * return status and does not know if get is successful or not.
2980          */
2981         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2982         dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
2983
2984         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2985         dev = &rte_eth_devices[port_id];
2986
2987         dev_info->rx_desc_lim = lim;
2988         dev_info->tx_desc_lim = lim;
2989         dev_info->device = dev->device;
2990         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2991         dev_info->max_mtu = UINT16_MAX;
2992
2993         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
2994         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2995         if (diag != 0) {
2996                 /* Cleanup already filled in device information */
2997                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2998                 return eth_err(port_id, diag);
2999         }
3000
3001         /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3002         dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3003                         RTE_MAX_QUEUES_PER_PORT);
3004         dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3005                         RTE_MAX_QUEUES_PER_PORT);
3006
3007         dev_info->driver_name = dev->device->driver->name;
3008         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3009         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3010
3011         dev_info->dev_flags = &dev->data->dev_flags;
3012
3013         return 0;
3014 }
3015
3016 int
3017 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3018                                  uint32_t *ptypes, int num)
3019 {
3020         int i, j;
3021         struct rte_eth_dev *dev;
3022         const uint32_t *all_ptypes;
3023
3024         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3025         dev = &rte_eth_devices[port_id];
3026         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3027         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3028
3029         if (!all_ptypes)
3030                 return 0;
3031
3032         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3033                 if (all_ptypes[i] & ptype_mask) {
3034                         if (j < num)
3035                                 ptypes[j] = all_ptypes[i];
3036                         j++;
3037                 }
3038
3039         return j;
3040 }
3041
3042 int
3043 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3044                                  uint32_t *set_ptypes, unsigned int num)
3045 {
3046         const uint32_t valid_ptype_masks[] = {
3047                 RTE_PTYPE_L2_MASK,
3048                 RTE_PTYPE_L3_MASK,
3049                 RTE_PTYPE_L4_MASK,
3050                 RTE_PTYPE_TUNNEL_MASK,
3051                 RTE_PTYPE_INNER_L2_MASK,
3052                 RTE_PTYPE_INNER_L3_MASK,
3053                 RTE_PTYPE_INNER_L4_MASK,
3054         };
3055         const uint32_t *all_ptypes;
3056         struct rte_eth_dev *dev;
3057         uint32_t unused_mask;
3058         unsigned int i, j;
3059         int ret;
3060
3061         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3062         dev = &rte_eth_devices[port_id];
3063
3064         if (num > 0 && set_ptypes == NULL)
3065                 return -EINVAL;
3066
3067         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3068                         *dev->dev_ops->dev_ptypes_set == NULL) {
3069                 ret = 0;
3070                 goto ptype_unknown;
3071         }
3072
3073         if (ptype_mask == 0) {
3074                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3075                                 ptype_mask);
3076                 goto ptype_unknown;
3077         }
3078
3079         unused_mask = ptype_mask;
3080         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3081                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3082                 if (mask && mask != valid_ptype_masks[i]) {
3083                         ret = -EINVAL;
3084                         goto ptype_unknown;
3085                 }
3086                 unused_mask &= ~valid_ptype_masks[i];
3087         }
3088
3089         if (unused_mask) {
3090                 ret = -EINVAL;
3091                 goto ptype_unknown;
3092         }
3093
3094         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3095         if (all_ptypes == NULL) {
3096                 ret = 0;
3097                 goto ptype_unknown;
3098         }
3099
3100         /*
3101          * Accommodate as many set_ptypes as possible. If the supplied
3102          * set_ptypes array is insufficient fill it partially.
3103          */
3104         for (i = 0, j = 0; set_ptypes != NULL &&
3105                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3106                 if (ptype_mask & all_ptypes[i]) {
3107                         if (j < num - 1) {
3108                                 set_ptypes[j] = all_ptypes[i];
3109                                 j++;
3110                                 continue;
3111                         }
3112                         break;
3113                 }
3114         }
3115
3116         if (set_ptypes != NULL && j < num)
3117                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3118
3119         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3120
3121 ptype_unknown:
3122         if (num > 0)
3123                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3124
3125         return ret;
3126 }
3127
3128 int
3129 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3130 {
3131         struct rte_eth_dev *dev;
3132
3133         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3134         dev = &rte_eth_devices[port_id];
3135         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3136
3137         return 0;
3138 }
3139
3140 int
3141 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3142 {
3143         struct rte_eth_dev *dev;
3144
3145         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3146
3147         dev = &rte_eth_devices[port_id];
3148         *mtu = dev->data->mtu;
3149         return 0;
3150 }
3151
3152 int
3153 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3154 {
3155         int ret;
3156         struct rte_eth_dev_info dev_info;
3157         struct rte_eth_dev *dev;
3158
3159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3160         dev = &rte_eth_devices[port_id];
3161         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3162
3163         /*
3164          * Check if the device supports dev_infos_get, if it does not
3165          * skip min_mtu/max_mtu validation here as this requires values
3166          * that are populated within the call to rte_eth_dev_info_get()
3167          * which relies on dev->dev_ops->dev_infos_get.
3168          */
3169         if (*dev->dev_ops->dev_infos_get != NULL) {
3170                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3171                 if (ret != 0)
3172                         return ret;
3173
3174                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3175                         return -EINVAL;
3176         }
3177
3178         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3179         if (!ret)
3180                 dev->data->mtu = mtu;
3181
3182         return eth_err(port_id, ret);
3183 }
3184
3185 int
3186 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3187 {
3188         struct rte_eth_dev *dev;
3189         int ret;
3190
3191         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3192         dev = &rte_eth_devices[port_id];
3193         if (!(dev->data->dev_conf.rxmode.offloads &
3194               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3195                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3196                         port_id);
3197                 return -ENOSYS;
3198         }
3199
3200         if (vlan_id > 4095) {
3201                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3202                         port_id, vlan_id);
3203                 return -EINVAL;
3204         }
3205         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3206
3207         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3208         if (ret == 0) {
3209                 struct rte_vlan_filter_conf *vfc;
3210                 int vidx;
3211                 int vbit;
3212
3213                 vfc = &dev->data->vlan_filter_conf;
3214                 vidx = vlan_id / 64;
3215                 vbit = vlan_id % 64;
3216
3217                 if (on)
3218                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3219                 else
3220                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3221         }
3222
3223         return eth_err(port_id, ret);
3224 }
3225
3226 int
3227 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3228                                     int on)
3229 {
3230         struct rte_eth_dev *dev;
3231
3232         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3233         dev = &rte_eth_devices[port_id];
3234         if (rx_queue_id >= dev->data->nb_rx_queues) {
3235                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3236                 return -EINVAL;
3237         }
3238
3239         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3240         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3241
3242         return 0;
3243 }
3244
3245 int
3246 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3247                                 enum rte_vlan_type vlan_type,
3248                                 uint16_t tpid)
3249 {
3250         struct rte_eth_dev *dev;
3251
3252         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3253         dev = &rte_eth_devices[port_id];
3254         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3255
3256         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3257                                                                tpid));
3258 }
3259
3260 int
3261 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3262 {
3263         struct rte_eth_dev *dev;
3264         int ret = 0;
3265         int mask = 0;
3266         int cur, org = 0;
3267         uint64_t orig_offloads;
3268         uint64_t dev_offloads;
3269
3270         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3271         dev = &rte_eth_devices[port_id];
3272
3273         /* save original values in case of failure */
3274         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3275         dev_offloads = orig_offloads;
3276
3277         /* check which option changed by application */
3278         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3279         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3280         if (cur != org) {
3281                 if (cur)
3282                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3283                 else
3284                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3285                 mask |= ETH_VLAN_STRIP_MASK;
3286         }
3287
3288         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3289         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3290         if (cur != org) {
3291                 if (cur)
3292                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3293                 else
3294                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3295                 mask |= ETH_VLAN_FILTER_MASK;
3296         }
3297
3298         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3299         org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3300         if (cur != org) {
3301                 if (cur)
3302                         dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3303                 else
3304                         dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3305                 mask |= ETH_VLAN_EXTEND_MASK;
3306         }
3307
3308         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3309         org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3310         if (cur != org) {
3311                 if (cur)
3312                         dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3313                 else
3314                         dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3315                 mask |= ETH_QINQ_STRIP_MASK;
3316         }
3317
3318         /*no change*/
3319         if (mask == 0)
3320                 return ret;
3321
3322         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3323         dev->data->dev_conf.rxmode.offloads = dev_offloads;
3324         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3325         if (ret) {
3326                 /* hit an error restore  original values */
3327                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3328         }
3329
3330         return eth_err(port_id, ret);
3331 }
3332
3333 int
3334 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3335 {
3336         struct rte_eth_dev *dev;
3337         uint64_t *dev_offloads;
3338         int ret = 0;
3339
3340         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3341         dev = &rte_eth_devices[port_id];
3342         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3343
3344         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3345                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3346
3347         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3348                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3349
3350         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3351                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3352
3353         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3354                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3355
3356         return ret;
3357 }
3358
3359 int
3360 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3361 {
3362         struct rte_eth_dev *dev;
3363
3364         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3365         dev = &rte_eth_devices[port_id];
3366         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3367
3368         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3369 }
3370
3371 int
3372 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3373 {
3374         struct rte_eth_dev *dev;
3375
3376         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3377         dev = &rte_eth_devices[port_id];
3378         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3379         memset(fc_conf, 0, sizeof(*fc_conf));
3380         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3381 }
3382
3383 int
3384 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3385 {
3386         struct rte_eth_dev *dev;
3387
3388         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3389         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3390                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3391                 return -EINVAL;
3392         }
3393
3394         dev = &rte_eth_devices[port_id];
3395         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3396         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3397 }
3398
3399 int
3400 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3401                                    struct rte_eth_pfc_conf *pfc_conf)
3402 {
3403         struct rte_eth_dev *dev;
3404
3405         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3406         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3407                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3408                 return -EINVAL;
3409         }
3410
3411         dev = &rte_eth_devices[port_id];
3412         /* High water, low water validation are device specific */
3413         if  (*dev->dev_ops->priority_flow_ctrl_set)
3414                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3415                                         (dev, pfc_conf));
3416         return -ENOTSUP;
3417 }
3418
3419 static int
3420 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3421                         uint16_t reta_size)
3422 {
3423         uint16_t i, num;
3424
3425         if (!reta_conf)
3426                 return -EINVAL;
3427
3428         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3429         for (i = 0; i < num; i++) {
3430                 if (reta_conf[i].mask)
3431                         return 0;
3432         }
3433
3434         return -EINVAL;
3435 }
3436
3437 static int
3438 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3439                          uint16_t reta_size,
3440                          uint16_t max_rxq)
3441 {
3442         uint16_t i, idx, shift;
3443
3444         if (!reta_conf)
3445                 return -EINVAL;
3446
3447         if (max_rxq == 0) {
3448                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3449                 return -EINVAL;
3450         }
3451
3452         for (i = 0; i < reta_size; i++) {
3453                 idx = i / RTE_RETA_GROUP_SIZE;
3454                 shift = i % RTE_RETA_GROUP_SIZE;
3455                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3456                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3457                         RTE_ETHDEV_LOG(ERR,
3458                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3459                                 idx, shift,
3460                                 reta_conf[idx].reta[shift], max_rxq);
3461                         return -EINVAL;
3462                 }
3463         }
3464
3465         return 0;
3466 }
3467
3468 int
3469 rte_eth_dev_rss_reta_update(uint16_t port_id,
3470                             struct rte_eth_rss_reta_entry64 *reta_conf,
3471                             uint16_t reta_size)
3472 {
3473         struct rte_eth_dev *dev;
3474         int ret;
3475
3476         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3477         /* Check mask bits */
3478         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3479         if (ret < 0)
3480                 return ret;
3481
3482         dev = &rte_eth_devices[port_id];
3483
3484         /* Check entry value */
3485         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3486                                 dev->data->nb_rx_queues);
3487         if (ret < 0)
3488                 return ret;
3489
3490         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3491         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3492                                                              reta_size));
3493 }
3494
3495 int
3496 rte_eth_dev_rss_reta_query(uint16_t port_id,
3497                            struct rte_eth_rss_reta_entry64 *reta_conf,
3498                            uint16_t reta_size)
3499 {
3500         struct rte_eth_dev *dev;
3501         int ret;
3502
3503         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3504
3505         /* Check mask bits */
3506         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3507         if (ret < 0)
3508                 return ret;
3509
3510         dev = &rte_eth_devices[port_id];
3511         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3512         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3513                                                             reta_size));
3514 }
3515
3516 int
3517 rte_eth_dev_rss_hash_update(uint16_t port_id,
3518                             struct rte_eth_rss_conf *rss_conf)
3519 {
3520         struct rte_eth_dev *dev;
3521         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3522         int ret;
3523
3524         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3525
3526         ret = rte_eth_dev_info_get(port_id, &dev_info);
3527         if (ret != 0)
3528                 return ret;
3529
3530         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3531
3532         dev = &rte_eth_devices[port_id];
3533         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3534             dev_info.flow_type_rss_offloads) {
3535                 RTE_ETHDEV_LOG(ERR,
3536                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3537                         port_id, rss_conf->rss_hf,
3538                         dev_info.flow_type_rss_offloads);
3539                 return -EINVAL;
3540         }
3541         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3542         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3543                                                                  rss_conf));
3544 }
3545
3546 int
3547 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3548                               struct rte_eth_rss_conf *rss_conf)
3549 {
3550         struct rte_eth_dev *dev;
3551
3552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3553         dev = &rte_eth_devices[port_id];
3554         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3555         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3556                                                                    rss_conf));
3557 }
3558
3559 int
3560 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3561                                 struct rte_eth_udp_tunnel *udp_tunnel)
3562 {
3563         struct rte_eth_dev *dev;
3564
3565         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3566         if (udp_tunnel == NULL) {
3567                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3568                 return -EINVAL;
3569         }
3570
3571         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3572                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3573                 return -EINVAL;
3574         }
3575
3576         dev = &rte_eth_devices[port_id];
3577         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3578         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3579                                                                 udp_tunnel));
3580 }
3581
3582 int
3583 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3584                                    struct rte_eth_udp_tunnel *udp_tunnel)
3585 {
3586         struct rte_eth_dev *dev;
3587
3588         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3589         dev = &rte_eth_devices[port_id];
3590
3591         if (udp_tunnel == NULL) {
3592                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3593                 return -EINVAL;
3594         }
3595
3596         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3597                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3598                 return -EINVAL;
3599         }
3600
3601         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3602         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3603                                                                 udp_tunnel));
3604 }
3605
3606 int
3607 rte_eth_led_on(uint16_t port_id)
3608 {
3609         struct rte_eth_dev *dev;
3610
3611         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3612         dev = &rte_eth_devices[port_id];
3613         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3614         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3615 }
3616
3617 int
3618 rte_eth_led_off(uint16_t port_id)
3619 {
3620         struct rte_eth_dev *dev;
3621
3622         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3623         dev = &rte_eth_devices[port_id];
3624         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3625         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3626 }
3627
3628 /*
3629  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3630  * an empty spot.
3631  */
3632 static int
3633 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3634 {
3635         struct rte_eth_dev_info dev_info;
3636         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3637         unsigned i;
3638         int ret;
3639
3640         ret = rte_eth_dev_info_get(port_id, &dev_info);
3641         if (ret != 0)
3642                 return -1;
3643
3644         for (i = 0; i < dev_info.max_mac_addrs; i++)
3645                 if (memcmp(addr, &dev->data->mac_addrs[i],
3646                                 RTE_ETHER_ADDR_LEN) == 0)
3647                         return i;
3648
3649         return -1;
3650 }
3651
3652 static const struct rte_ether_addr null_mac_addr;
3653
3654 int
3655 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3656                         uint32_t pool)
3657 {
3658         struct rte_eth_dev *dev;
3659         int index;
3660         uint64_t pool_mask;
3661         int ret;
3662
3663         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3664         dev = &rte_eth_devices[port_id];
3665         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3666
3667         if (rte_is_zero_ether_addr(addr)) {
3668                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3669                         port_id);
3670                 return -EINVAL;
3671         }
3672         if (pool >= ETH_64_POOLS) {
3673                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3674                 return -EINVAL;
3675         }
3676
3677         index = get_mac_addr_index(port_id, addr);
3678         if (index < 0) {
3679                 index = get_mac_addr_index(port_id, &null_mac_addr);
3680                 if (index < 0) {
3681                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3682                                 port_id);
3683                         return -ENOSPC;
3684                 }
3685         } else {
3686                 pool_mask = dev->data->mac_pool_sel[index];
3687
3688                 /* Check if both MAC address and pool is already there, and do nothing */
3689                 if (pool_mask & (1ULL << pool))
3690                         return 0;
3691         }
3692
3693         /* Update NIC */
3694         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3695
3696         if (ret == 0) {
3697                 /* Update address in NIC data structure */
3698                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3699
3700                 /* Update pool bitmap in NIC data structure */
3701                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3702         }
3703
3704         return eth_err(port_id, ret);
3705 }
3706
3707 int
3708 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
3709 {
3710         struct rte_eth_dev *dev;
3711         int index;
3712
3713         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3714         dev = &rte_eth_devices[port_id];
3715         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3716
3717         index = get_mac_addr_index(port_id, addr);
3718         if (index == 0) {
3719                 RTE_ETHDEV_LOG(ERR,
3720                         "Port %u: Cannot remove default MAC address\n",
3721                         port_id);
3722                 return -EADDRINUSE;
3723         } else if (index < 0)
3724                 return 0;  /* Do nothing if address wasn't found */
3725
3726         /* Update NIC */
3727         (*dev->dev_ops->mac_addr_remove)(dev, index);
3728
3729         /* Update address in NIC data structure */
3730         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3731
3732         /* reset pool bitmap */
3733         dev->data->mac_pool_sel[index] = 0;
3734
3735         return 0;
3736 }
3737
3738 int
3739 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
3740 {
3741         struct rte_eth_dev *dev;
3742         int ret;
3743
3744         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3745
3746         if (!rte_is_valid_assigned_ether_addr(addr))
3747                 return -EINVAL;
3748
3749         dev = &rte_eth_devices[port_id];
3750         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3751
3752         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3753         if (ret < 0)
3754                 return ret;
3755
3756         /* Update default address in NIC data structure */
3757         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3758
3759         return 0;
3760 }
3761
3762
3763 /*
3764  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3765  * an empty spot.
3766  */
3767 static int
3768 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3769 {
3770         struct rte_eth_dev_info dev_info;
3771         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3772         unsigned i;
3773         int ret;
3774
3775         ret = rte_eth_dev_info_get(port_id, &dev_info);
3776         if (ret != 0)
3777                 return -1;
3778
3779         if (!dev->data->hash_mac_addrs)
3780                 return -1;
3781
3782         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3783                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3784                         RTE_ETHER_ADDR_LEN) == 0)
3785                         return i;
3786
3787         return -1;
3788 }
3789
3790 int
3791 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3792                                 uint8_t on)
3793 {
3794         int index;
3795         int ret;
3796         struct rte_eth_dev *dev;
3797
3798         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3799
3800         dev = &rte_eth_devices[port_id];
3801         if (rte_is_zero_ether_addr(addr)) {
3802                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3803                         port_id);
3804                 return -EINVAL;
3805         }
3806
3807         index = get_hash_mac_addr_index(port_id, addr);
3808         /* Check if it's already there, and do nothing */
3809         if ((index >= 0) && on)
3810                 return 0;
3811
3812         if (index < 0) {
3813                 if (!on) {
3814                         RTE_ETHDEV_LOG(ERR,
3815                                 "Port %u: the MAC address was not set in UTA\n",
3816                                 port_id);
3817                         return -EINVAL;
3818                 }
3819
3820                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3821                 if (index < 0) {
3822                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3823                                 port_id);
3824                         return -ENOSPC;
3825                 }
3826         }
3827
3828         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3829         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3830         if (ret == 0) {
3831                 /* Update address in NIC data structure */
3832                 if (on)
3833                         rte_ether_addr_copy(addr,
3834                                         &dev->data->hash_mac_addrs[index]);
3835                 else
3836                         rte_ether_addr_copy(&null_mac_addr,
3837                                         &dev->data->hash_mac_addrs[index]);
3838         }
3839
3840         return eth_err(port_id, ret);
3841 }
3842
3843 int
3844 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3845 {
3846         struct rte_eth_dev *dev;
3847
3848         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3849
3850         dev = &rte_eth_devices[port_id];
3851
3852         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3853         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3854                                                                        on));
3855 }
3856
3857 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3858                                         uint16_t tx_rate)
3859 {
3860         struct rte_eth_dev *dev;
3861         struct rte_eth_dev_info dev_info;
3862         struct rte_eth_link link;
3863         int ret;
3864
3865         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3866
3867         ret = rte_eth_dev_info_get(port_id, &dev_info);
3868         if (ret != 0)
3869                 return ret;
3870
3871         dev = &rte_eth_devices[port_id];
3872         link = dev->data->dev_link;
3873
3874         if (queue_idx > dev_info.max_tx_queues) {
3875                 RTE_ETHDEV_LOG(ERR,
3876                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3877                         port_id, queue_idx);
3878                 return -EINVAL;
3879         }
3880
3881         if (tx_rate > link.link_speed) {
3882                 RTE_ETHDEV_LOG(ERR,
3883                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3884                         tx_rate, link.link_speed);
3885                 return -EINVAL;
3886         }
3887
3888         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3889         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3890                                                         queue_idx, tx_rate));
3891 }
3892
3893 int
3894 rte_eth_mirror_rule_set(uint16_t port_id,
3895                         struct rte_eth_mirror_conf *mirror_conf,
3896                         uint8_t rule_id, uint8_t on)
3897 {
3898         struct rte_eth_dev *dev;
3899
3900         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3901         if (mirror_conf->rule_type == 0) {
3902                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3903                 return -EINVAL;
3904         }
3905
3906         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3907                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3908                         ETH_64_POOLS - 1);
3909                 return -EINVAL;
3910         }
3911
3912         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3913              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3914             (mirror_conf->pool_mask == 0)) {
3915                 RTE_ETHDEV_LOG(ERR,
3916                         "Invalid mirror pool, pool mask can not be 0\n");
3917                 return -EINVAL;
3918         }
3919
3920         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3921             mirror_conf->vlan.vlan_mask == 0) {
3922                 RTE_ETHDEV_LOG(ERR,
3923                         "Invalid vlan mask, vlan mask can not be 0\n");
3924                 return -EINVAL;
3925         }
3926
3927         dev = &rte_eth_devices[port_id];
3928         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3929
3930         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3931                                                 mirror_conf, rule_id, on));
3932 }
3933
3934 int
3935 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3936 {
3937         struct rte_eth_dev *dev;
3938
3939         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3940
3941         dev = &rte_eth_devices[port_id];
3942         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3943
3944         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3945                                                                    rule_id));
3946 }
3947
3948 RTE_INIT(eth_dev_init_cb_lists)
3949 {
3950         int i;
3951
3952         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3953                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3954 }
3955
3956 int
3957 rte_eth_dev_callback_register(uint16_t port_id,
3958                         enum rte_eth_event_type event,
3959                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3960 {
3961         struct rte_eth_dev *dev;
3962         struct rte_eth_dev_callback *user_cb;
3963         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3964         uint16_t last_port;
3965
3966         if (!cb_fn)
3967                 return -EINVAL;
3968
3969         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3970                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3971                 return -EINVAL;
3972         }
3973
3974         if (port_id == RTE_ETH_ALL) {
3975                 next_port = 0;
3976                 last_port = RTE_MAX_ETHPORTS - 1;
3977         } else {
3978                 next_port = last_port = port_id;
3979         }
3980
3981         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3982
3983         do {
3984                 dev = &rte_eth_devices[next_port];
3985
3986                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3987                         if (user_cb->cb_fn == cb_fn &&
3988                                 user_cb->cb_arg == cb_arg &&
3989                                 user_cb->event == event) {
3990                                 break;
3991                         }
3992                 }
3993
3994                 /* create a new callback. */
3995                 if (user_cb == NULL) {
3996                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3997                                 sizeof(struct rte_eth_dev_callback), 0);
3998                         if (user_cb != NULL) {
3999                                 user_cb->cb_fn = cb_fn;
4000                                 user_cb->cb_arg = cb_arg;
4001                                 user_cb->event = event;
4002                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4003                                                   user_cb, next);
4004                         } else {
4005                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4006                                 rte_eth_dev_callback_unregister(port_id, event,
4007                                                                 cb_fn, cb_arg);
4008                                 return -ENOMEM;
4009                         }
4010
4011                 }
4012         } while (++next_port <= last_port);
4013
4014         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4015         return 0;
4016 }
4017
4018 int
4019 rte_eth_dev_callback_unregister(uint16_t port_id,
4020                         enum rte_eth_event_type event,
4021                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4022 {
4023         int ret;
4024         struct rte_eth_dev *dev;
4025         struct rte_eth_dev_callback *cb, *next;
4026         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4027         uint16_t last_port;
4028
4029         if (!cb_fn)
4030                 return -EINVAL;
4031
4032         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4033                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4034                 return -EINVAL;
4035         }
4036
4037         if (port_id == RTE_ETH_ALL) {
4038                 next_port = 0;
4039                 last_port = RTE_MAX_ETHPORTS - 1;
4040         } else {
4041                 next_port = last_port = port_id;
4042         }
4043
4044         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4045
4046         do {
4047                 dev = &rte_eth_devices[next_port];
4048                 ret = 0;
4049                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4050                      cb = next) {
4051
4052                         next = TAILQ_NEXT(cb, next);
4053
4054                         if (cb->cb_fn != cb_fn || cb->event != event ||
4055                             (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4056                                 continue;
4057
4058                         /*
4059                          * if this callback is not executing right now,
4060                          * then remove it.
4061                          */
4062                         if (cb->active == 0) {
4063                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4064                                 rte_free(cb);
4065                         } else {
4066                                 ret = -EAGAIN;
4067                         }
4068                 }
4069         } while (++next_port <= last_port);
4070
4071         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4072         return ret;
4073 }
4074
4075 int
4076 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4077         enum rte_eth_event_type event, void *ret_param)
4078 {
4079         struct rte_eth_dev_callback *cb_lst;
4080         struct rte_eth_dev_callback dev_cb;
4081         int rc = 0;
4082
4083         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4084         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4085                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4086                         continue;
4087                 dev_cb = *cb_lst;
4088                 cb_lst->active = 1;
4089                 if (ret_param != NULL)
4090                         dev_cb.ret_param = ret_param;
4091
4092                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4093                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4094                                 dev_cb.cb_arg, dev_cb.ret_param);
4095                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4096                 cb_lst->active = 0;
4097         }
4098         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4099         return rc;
4100 }
4101
4102 void
4103 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4104 {
4105         if (dev == NULL)
4106                 return;
4107
4108         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4109
4110         dev->state = RTE_ETH_DEV_ATTACHED;
4111 }
4112
4113 int
4114 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4115 {
4116         uint32_t vec;
4117         struct rte_eth_dev *dev;
4118         struct rte_intr_handle *intr_handle;
4119         uint16_t qid;
4120         int rc;
4121
4122         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4123
4124         dev = &rte_eth_devices[port_id];
4125
4126         if (!dev->intr_handle) {
4127                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4128                 return -ENOTSUP;
4129         }
4130
4131         intr_handle = dev->intr_handle;
4132         if (!intr_handle->intr_vec) {
4133                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4134                 return -EPERM;
4135         }
4136
4137         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4138                 vec = intr_handle->intr_vec[qid];
4139                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4140                 if (rc && rc != -EEXIST) {
4141                         RTE_ETHDEV_LOG(ERR,
4142                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4143                                 port_id, qid, op, epfd, vec);
4144                 }
4145         }
4146
4147         return 0;
4148 }
4149
4150 int
4151 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4152 {
4153         struct rte_intr_handle *intr_handle;
4154         struct rte_eth_dev *dev;
4155         unsigned int efd_idx;
4156         uint32_t vec;
4157         int fd;
4158
4159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4160
4161         dev = &rte_eth_devices[port_id];
4162
4163         if (queue_id >= dev->data->nb_rx_queues) {
4164                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4165                 return -1;
4166         }
4167
4168         if (!dev->intr_handle) {
4169                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4170                 return -1;
4171         }
4172
4173         intr_handle = dev->intr_handle;
4174         if (!intr_handle->intr_vec) {
4175                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4176                 return -1;
4177         }
4178
4179         vec = intr_handle->intr_vec[queue_id];
4180         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4181                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4182         fd = intr_handle->efds[efd_idx];
4183
4184         return fd;
4185 }
4186
4187 const struct rte_memzone *
4188 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4189                          uint16_t queue_id, size_t size, unsigned align,
4190                          int socket_id)
4191 {
4192         char z_name[RTE_MEMZONE_NAMESIZE];
4193         const struct rte_memzone *mz;
4194         int rc;
4195
4196         rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
4197                       dev->data->port_id, queue_id, ring_name);
4198         if (rc >= RTE_MEMZONE_NAMESIZE) {
4199                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4200                 rte_errno = ENAMETOOLONG;
4201                 return NULL;
4202         }
4203
4204         mz = rte_memzone_lookup(z_name);
4205         if (mz)
4206                 return mz;
4207
4208         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4209                         RTE_MEMZONE_IOVA_CONTIG, align);
4210 }
4211
4212 int
4213 rte_eth_dev_create(struct rte_device *device, const char *name,
4214         size_t priv_data_size,
4215         ethdev_bus_specific_init ethdev_bus_specific_init,
4216         void *bus_init_params,
4217         ethdev_init_t ethdev_init, void *init_params)
4218 {
4219         struct rte_eth_dev *ethdev;
4220         int retval;
4221
4222         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4223
4224         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4225                 ethdev = rte_eth_dev_allocate(name);
4226                 if (!ethdev)
4227                         return -ENODEV;
4228
4229                 if (priv_data_size) {
4230                         ethdev->data->dev_private = rte_zmalloc_socket(
4231                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4232                                 device->numa_node);
4233
4234                         if (!ethdev->data->dev_private) {
4235                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
4236                                 retval = -ENOMEM;
4237                                 goto probe_failed;
4238                         }
4239                 }
4240         } else {
4241                 ethdev = rte_eth_dev_attach_secondary(name);
4242                 if (!ethdev) {
4243                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
4244                                 "ethdev doesn't exist");
4245                         return  -ENODEV;
4246                 }
4247         }
4248
4249         ethdev->device = device;
4250
4251         if (ethdev_bus_specific_init) {
4252                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4253                 if (retval) {
4254                         RTE_LOG(ERR, EAL,
4255                                 "ethdev bus specific initialisation failed");
4256                         goto probe_failed;
4257                 }
4258         }
4259
4260         retval = ethdev_init(ethdev, init_params);
4261         if (retval) {
4262                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
4263                 goto probe_failed;
4264         }
4265
4266         rte_eth_dev_probing_finish(ethdev);
4267
4268         return retval;
4269
4270 probe_failed:
4271         rte_eth_dev_release_port(ethdev);
4272         return retval;
4273 }
4274
4275 int
4276 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4277         ethdev_uninit_t ethdev_uninit)
4278 {
4279         int ret;
4280
4281         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4282         if (!ethdev)
4283                 return -ENODEV;
4284
4285         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4286
4287         ret = ethdev_uninit(ethdev);
4288         if (ret)
4289                 return ret;
4290
4291         return rte_eth_dev_release_port(ethdev);
4292 }
4293
4294 int
4295 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4296                           int epfd, int op, void *data)
4297 {
4298         uint32_t vec;
4299         struct rte_eth_dev *dev;
4300         struct rte_intr_handle *intr_handle;
4301         int rc;
4302
4303         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4304
4305         dev = &rte_eth_devices[port_id];
4306         if (queue_id >= dev->data->nb_rx_queues) {
4307                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4308                 return -EINVAL;
4309         }
4310
4311         if (!dev->intr_handle) {
4312                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4313                 return -ENOTSUP;
4314         }
4315
4316         intr_handle = dev->intr_handle;
4317         if (!intr_handle->intr_vec) {
4318                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4319                 return -EPERM;
4320         }
4321
4322         vec = intr_handle->intr_vec[queue_id];
4323         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4324         if (rc && rc != -EEXIST) {
4325                 RTE_ETHDEV_LOG(ERR,
4326                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4327                         port_id, queue_id, op, epfd, vec);
4328                 return rc;
4329         }
4330
4331         return 0;
4332 }
4333
4334 int
4335 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4336                            uint16_t queue_id)
4337 {
4338         struct rte_eth_dev *dev;
4339
4340         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4341
4342         dev = &rte_eth_devices[port_id];
4343
4344         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4345         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4346                                                                 queue_id));
4347 }
4348
4349 int
4350 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4351                             uint16_t queue_id)
4352 {
4353         struct rte_eth_dev *dev;
4354
4355         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4356
4357         dev = &rte_eth_devices[port_id];
4358
4359         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4360         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4361                                                                 queue_id));
4362 }
4363
4364
4365 int
4366 rte_eth_dev_filter_supported(uint16_t port_id,
4367                              enum rte_filter_type filter_type)
4368 {
4369         struct rte_eth_dev *dev;
4370
4371         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4372
4373         dev = &rte_eth_devices[port_id];
4374         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4375         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4376                                 RTE_ETH_FILTER_NOP, NULL);
4377 }
4378
4379 int
4380 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4381                         enum rte_filter_op filter_op, void *arg)
4382 {
4383         struct rte_eth_dev *dev;
4384
4385         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4386
4387         dev = &rte_eth_devices[port_id];
4388         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4389         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4390                                                              filter_op, arg));
4391 }
4392
4393 const struct rte_eth_rxtx_callback *
4394 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4395                 rte_rx_callback_fn fn, void *user_param)
4396 {
4397 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4398         rte_errno = ENOTSUP;
4399         return NULL;
4400 #endif
4401         struct rte_eth_dev *dev;
4402
4403         /* check input parameters */
4404         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4405                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4406                 rte_errno = EINVAL;
4407                 return NULL;
4408         }
4409         dev = &rte_eth_devices[port_id];
4410         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4411                 rte_errno = EINVAL;
4412                 return NULL;
4413         }
4414         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4415
4416         if (cb == NULL) {
4417                 rte_errno = ENOMEM;
4418                 return NULL;
4419         }
4420
4421         cb->fn.rx = fn;
4422         cb->param = user_param;
4423
4424         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4425         /* Add the callbacks in fifo order. */
4426         struct rte_eth_rxtx_callback *tail =
4427                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4428
4429         if (!tail) {
4430                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4431
4432         } else {
4433                 while (tail->next)
4434                         tail = tail->next;
4435                 tail->next = cb;
4436         }
4437         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4438
4439         return cb;
4440 }
4441
4442 const struct rte_eth_rxtx_callback *
4443 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4444                 rte_rx_callback_fn fn, void *user_param)
4445 {
4446 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4447         rte_errno = ENOTSUP;
4448         return NULL;
4449 #endif
4450         /* check input parameters */
4451         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4452                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4453                 rte_errno = EINVAL;
4454                 return NULL;
4455         }
4456
4457         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4458
4459         if (cb == NULL) {
4460                 rte_errno = ENOMEM;
4461                 return NULL;
4462         }
4463
4464         cb->fn.rx = fn;
4465         cb->param = user_param;
4466
4467         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4468         /* Add the callbacks at first position */
4469         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4470         rte_smp_wmb();
4471         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4472         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4473
4474         return cb;
4475 }
4476
4477 const struct rte_eth_rxtx_callback *
4478 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4479                 rte_tx_callback_fn fn, void *user_param)
4480 {
4481 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4482         rte_errno = ENOTSUP;
4483         return NULL;
4484 #endif
4485         struct rte_eth_dev *dev;
4486
4487         /* check input parameters */
4488         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4489                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4490                 rte_errno = EINVAL;
4491                 return NULL;
4492         }
4493
4494         dev = &rte_eth_devices[port_id];
4495         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4496                 rte_errno = EINVAL;
4497                 return NULL;
4498         }
4499
4500         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4501
4502         if (cb == NULL) {
4503                 rte_errno = ENOMEM;
4504                 return NULL;
4505         }
4506
4507         cb->fn.tx = fn;
4508         cb->param = user_param;
4509
4510         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4511         /* Add the callbacks in fifo order. */
4512         struct rte_eth_rxtx_callback *tail =
4513                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4514
4515         if (!tail) {
4516                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4517
4518         } else {
4519                 while (tail->next)
4520                         tail = tail->next;
4521                 tail->next = cb;
4522         }
4523         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4524
4525         return cb;
4526 }
4527
4528 int
4529 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4530                 const struct rte_eth_rxtx_callback *user_cb)
4531 {
4532 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4533         return -ENOTSUP;
4534 #endif
4535         /* Check input parameters. */
4536         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4537         if (user_cb == NULL ||
4538                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4539                 return -EINVAL;
4540
4541         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4542         struct rte_eth_rxtx_callback *cb;
4543         struct rte_eth_rxtx_callback **prev_cb;
4544         int ret = -EINVAL;
4545
4546         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4547         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4548         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4549                 cb = *prev_cb;
4550                 if (cb == user_cb) {
4551                         /* Remove the user cb from the callback list. */
4552                         *prev_cb = cb->next;
4553                         ret = 0;
4554                         break;
4555                 }
4556         }
4557         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4558
4559         return ret;
4560 }
4561
4562 int
4563 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4564                 const struct rte_eth_rxtx_callback *user_cb)
4565 {
4566 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4567         return -ENOTSUP;
4568 #endif
4569         /* Check input parameters. */
4570         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4571         if (user_cb == NULL ||
4572                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4573                 return -EINVAL;
4574
4575         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4576         int ret = -EINVAL;
4577         struct rte_eth_rxtx_callback *cb;
4578         struct rte_eth_rxtx_callback **prev_cb;
4579
4580         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4581         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4582         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4583                 cb = *prev_cb;
4584                 if (cb == user_cb) {
4585                         /* Remove the user cb from the callback list. */
4586                         *prev_cb = cb->next;
4587                         ret = 0;
4588                         break;
4589                 }
4590         }
4591         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4592
4593         return ret;
4594 }
4595
4596 int
4597 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4598         struct rte_eth_rxq_info *qinfo)
4599 {
4600         struct rte_eth_dev *dev;
4601
4602         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4603
4604         if (qinfo == NULL)
4605                 return -EINVAL;
4606
4607         dev = &rte_eth_devices[port_id];
4608         if (queue_id >= dev->data->nb_rx_queues) {
4609                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4610                 return -EINVAL;
4611         }
4612
4613         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4614                 RTE_ETHDEV_LOG(INFO,
4615                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4616                         queue_id, port_id);
4617                 return -EINVAL;
4618         }
4619
4620         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4621
4622         memset(qinfo, 0, sizeof(*qinfo));
4623         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4624         return 0;
4625 }
4626
4627 int
4628 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4629         struct rte_eth_txq_info *qinfo)
4630 {
4631         struct rte_eth_dev *dev;
4632
4633         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4634
4635         if (qinfo == NULL)
4636                 return -EINVAL;
4637
4638         dev = &rte_eth_devices[port_id];
4639         if (queue_id >= dev->data->nb_tx_queues) {
4640                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4641                 return -EINVAL;
4642         }
4643
4644         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4645                 RTE_ETHDEV_LOG(INFO,
4646                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4647                         queue_id, port_id);
4648                 return -EINVAL;
4649         }
4650
4651         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4652
4653         memset(qinfo, 0, sizeof(*qinfo));
4654         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4655
4656         return 0;
4657 }
4658
4659 int
4660 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4661                           struct rte_eth_burst_mode *mode)
4662 {
4663         struct rte_eth_dev *dev;
4664
4665         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4666
4667         if (mode == NULL)
4668                 return -EINVAL;
4669
4670         dev = &rte_eth_devices[port_id];
4671
4672         if (queue_id >= dev->data->nb_rx_queues) {
4673                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4674                 return -EINVAL;
4675         }
4676
4677         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
4678         memset(mode, 0, sizeof(*mode));
4679         return eth_err(port_id,
4680                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
4681 }
4682
4683 int
4684 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4685                           struct rte_eth_burst_mode *mode)
4686 {
4687         struct rte_eth_dev *dev;
4688
4689         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4690
4691         if (mode == NULL)
4692                 return -EINVAL;
4693
4694         dev = &rte_eth_devices[port_id];
4695
4696         if (queue_id >= dev->data->nb_tx_queues) {
4697                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4698                 return -EINVAL;
4699         }
4700
4701         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
4702         memset(mode, 0, sizeof(*mode));
4703         return eth_err(port_id,
4704                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
4705 }
4706
4707 int
4708 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4709                              struct rte_ether_addr *mc_addr_set,
4710                              uint32_t nb_mc_addr)
4711 {
4712         struct rte_eth_dev *dev;
4713
4714         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4715
4716         dev = &rte_eth_devices[port_id];
4717         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4718         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4719                                                 mc_addr_set, nb_mc_addr));
4720 }
4721
4722 int
4723 rte_eth_timesync_enable(uint16_t port_id)
4724 {
4725         struct rte_eth_dev *dev;
4726
4727         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4728         dev = &rte_eth_devices[port_id];
4729
4730         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4731         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4732 }
4733
4734 int
4735 rte_eth_timesync_disable(uint16_t port_id)
4736 {
4737         struct rte_eth_dev *dev;
4738
4739         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4740         dev = &rte_eth_devices[port_id];
4741
4742         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4743         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4744 }
4745
4746 int
4747 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4748                                    uint32_t flags)
4749 {
4750         struct rte_eth_dev *dev;
4751
4752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4753         dev = &rte_eth_devices[port_id];
4754
4755         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4756         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4757                                 (dev, timestamp, flags));
4758 }
4759
4760 int
4761 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4762                                    struct timespec *timestamp)
4763 {
4764         struct rte_eth_dev *dev;
4765
4766         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4767         dev = &rte_eth_devices[port_id];
4768
4769         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4770         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4771                                 (dev, timestamp));
4772 }
4773
4774 int
4775 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4776 {
4777         struct rte_eth_dev *dev;
4778
4779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4780         dev = &rte_eth_devices[port_id];
4781
4782         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4783         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4784                                                                       delta));
4785 }
4786
4787 int
4788 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4789 {
4790         struct rte_eth_dev *dev;
4791
4792         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4793         dev = &rte_eth_devices[port_id];
4794
4795         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4796         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4797                                                                 timestamp));
4798 }
4799
4800 int
4801 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4802 {
4803         struct rte_eth_dev *dev;
4804
4805         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4806         dev = &rte_eth_devices[port_id];
4807
4808         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4809         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4810                                                                 timestamp));
4811 }
4812
4813 int
4814 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
4815 {
4816         struct rte_eth_dev *dev;
4817
4818         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4819         dev = &rte_eth_devices[port_id];
4820
4821         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
4822         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
4823 }
4824
4825 int
4826 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4827 {
4828         struct rte_eth_dev *dev;
4829
4830         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4831
4832         dev = &rte_eth_devices[port_id];
4833         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4834         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4835 }
4836
4837 int
4838 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4839 {
4840         struct rte_eth_dev *dev;
4841
4842         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4843
4844         dev = &rte_eth_devices[port_id];
4845         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4846         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4847 }
4848
4849 int
4850 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4851 {
4852         struct rte_eth_dev *dev;
4853
4854         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4855
4856         dev = &rte_eth_devices[port_id];
4857         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4858         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4859 }
4860
4861 int
4862 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4863 {
4864         struct rte_eth_dev *dev;
4865
4866         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4867
4868         dev = &rte_eth_devices[port_id];
4869         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4870         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4871 }
4872
4873 int
4874 rte_eth_dev_get_module_info(uint16_t port_id,
4875                             struct rte_eth_dev_module_info *modinfo)
4876 {
4877         struct rte_eth_dev *dev;
4878
4879         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4880
4881         dev = &rte_eth_devices[port_id];
4882         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4883         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4884 }
4885
4886 int
4887 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4888                               struct rte_dev_eeprom_info *info)
4889 {
4890         struct rte_eth_dev *dev;
4891
4892         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4893
4894         dev = &rte_eth_devices[port_id];
4895         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4896         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4897 }
4898
4899 int
4900 rte_eth_dev_get_dcb_info(uint16_t port_id,
4901                              struct rte_eth_dcb_info *dcb_info)
4902 {
4903         struct rte_eth_dev *dev;
4904
4905         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4906
4907         dev = &rte_eth_devices[port_id];
4908         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4909
4910         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4911         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4912 }
4913
4914 int
4915 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4916                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4917 {
4918         struct rte_eth_dev *dev;
4919
4920         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4921         if (l2_tunnel == NULL) {
4922                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4923                 return -EINVAL;
4924         }
4925
4926         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4927                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4928                 return -EINVAL;
4929         }
4930
4931         dev = &rte_eth_devices[port_id];
4932         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4933                                 -ENOTSUP);
4934         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4935                                                                 l2_tunnel));
4936 }
4937
4938 int
4939 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4940                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4941                                   uint32_t mask,
4942                                   uint8_t en)
4943 {
4944         struct rte_eth_dev *dev;
4945
4946         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4947
4948         if (l2_tunnel == NULL) {
4949                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4950                 return -EINVAL;
4951         }
4952
4953         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4954                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4955                 return -EINVAL;
4956         }
4957
4958         if (mask == 0) {
4959                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4960                 return -EINVAL;
4961         }
4962
4963         dev = &rte_eth_devices[port_id];
4964         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4965                                 -ENOTSUP);
4966         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4967                                                         l2_tunnel, mask, en));
4968 }
4969
4970 static void
4971 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4972                            const struct rte_eth_desc_lim *desc_lim)
4973 {
4974         if (desc_lim->nb_align != 0)
4975                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4976
4977         if (desc_lim->nb_max != 0)
4978                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4979
4980         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4981 }
4982
4983 int
4984 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4985                                  uint16_t *nb_rx_desc,
4986                                  uint16_t *nb_tx_desc)
4987 {
4988         struct rte_eth_dev_info dev_info;
4989         int ret;
4990
4991         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4992
4993         ret = rte_eth_dev_info_get(port_id, &dev_info);
4994         if (ret != 0)
4995                 return ret;
4996
4997         if (nb_rx_desc != NULL)
4998                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4999
5000         if (nb_tx_desc != NULL)
5001                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5002
5003         return 0;
5004 }
5005
5006 int
5007 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5008                                    struct rte_eth_hairpin_cap *cap)
5009 {
5010         struct rte_eth_dev *dev;
5011
5012         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
5013
5014         dev = &rte_eth_devices[port_id];
5015         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5016         memset(cap, 0, sizeof(*cap));
5017         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5018 }
5019
5020 int
5021 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5022 {
5023         if (dev->data->rx_queue_state[queue_id] ==
5024             RTE_ETH_QUEUE_STATE_HAIRPIN)
5025                 return 1;
5026         return 0;
5027 }
5028
5029 int
5030 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5031 {
5032         if (dev->data->tx_queue_state[queue_id] ==
5033             RTE_ETH_QUEUE_STATE_HAIRPIN)
5034                 return 1;
5035         return 0;
5036 }
5037
5038 int
5039 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5040 {
5041         struct rte_eth_dev *dev;
5042
5043         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5044
5045         if (pool == NULL)
5046                 return -EINVAL;
5047
5048         dev = &rte_eth_devices[port_id];
5049
5050         if (*dev->dev_ops->pool_ops_supported == NULL)
5051                 return 1; /* all pools are supported */
5052
5053         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5054 }
5055
5056 /**
5057  * A set of values to describe the possible states of a switch domain.
5058  */
5059 enum rte_eth_switch_domain_state {
5060         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5061         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5062 };
5063
5064 /**
5065  * Array of switch domains available for allocation. Array is sized to
5066  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5067  * ethdev ports in a single process.
5068  */
5069 static struct rte_eth_dev_switch {
5070         enum rte_eth_switch_domain_state state;
5071 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
5072
5073 int
5074 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5075 {
5076         unsigned int i;
5077
5078         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5079
5080         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
5081                 if (rte_eth_switch_domains[i].state ==
5082                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5083                         rte_eth_switch_domains[i].state =
5084                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5085                         *domain_id = i;
5086                         return 0;
5087                 }
5088         }
5089
5090         return -ENOSPC;
5091 }
5092
5093 int
5094 rte_eth_switch_domain_free(uint16_t domain_id)
5095 {
5096         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5097                 domain_id >= RTE_MAX_ETHPORTS)
5098                 return -EINVAL;
5099
5100         if (rte_eth_switch_domains[domain_id].state !=
5101                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5102                 return -EINVAL;
5103
5104         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5105
5106         return 0;
5107 }
5108
5109 static int
5110 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5111 {
5112         int state;
5113         struct rte_kvargs_pair *pair;
5114         char *letter;
5115
5116         arglist->str = strdup(str_in);
5117         if (arglist->str == NULL)
5118                 return -ENOMEM;
5119
5120         letter = arglist->str;
5121         state = 0;
5122         arglist->count = 0;
5123         pair = &arglist->pairs[0];
5124         while (1) {
5125                 switch (state) {
5126                 case 0: /* Initial */
5127                         if (*letter == '=')
5128                                 return -EINVAL;
5129                         else if (*letter == '\0')
5130                                 return 0;
5131
5132                         state = 1;
5133                         pair->key = letter;
5134                         /* fall-thru */
5135
5136                 case 1: /* Parsing key */
5137                         if (*letter == '=') {
5138                                 *letter = '\0';
5139                                 pair->value = letter + 1;
5140                                 state = 2;
5141                         } else if (*letter == ',' || *letter == '\0')
5142                                 return -EINVAL;
5143                         break;
5144
5145
5146                 case 2: /* Parsing value */
5147                         if (*letter == '[')
5148                                 state = 3;
5149                         else if (*letter == ',') {
5150                                 *letter = '\0';
5151                                 arglist->count++;
5152                                 pair = &arglist->pairs[arglist->count];
5153                                 state = 0;
5154                         } else if (*letter == '\0') {
5155                                 letter--;
5156                                 arglist->count++;
5157                                 pair = &arglist->pairs[arglist->count];
5158                                 state = 0;
5159                         }
5160                         break;
5161
5162                 case 3: /* Parsing list */
5163                         if (*letter == ']')
5164                                 state = 2;
5165                         else if (*letter == '\0')
5166                                 return -EINVAL;
5167                         break;
5168                 }
5169                 letter++;
5170         }
5171 }
5172
5173 int
5174 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5175 {
5176         struct rte_kvargs args;
5177         struct rte_kvargs_pair *pair;
5178         unsigned int i;
5179         int result = 0;
5180
5181         memset(eth_da, 0, sizeof(*eth_da));
5182
5183         result = rte_eth_devargs_tokenise(&args, dargs);
5184         if (result < 0)
5185                 goto parse_cleanup;
5186
5187         for (i = 0; i < args.count; i++) {
5188                 pair = &args.pairs[i];
5189                 if (strcmp("representor", pair->key) == 0) {
5190                         result = rte_eth_devargs_parse_list(pair->value,
5191                                 rte_eth_devargs_parse_representor_ports,
5192                                 eth_da);
5193                         if (result < 0)
5194                                 goto parse_cleanup;
5195                 }
5196         }
5197
5198 parse_cleanup:
5199         if (args.str)
5200                 free(args.str);
5201
5202         return result;
5203 }
5204
5205 #ifdef RTE_LIBRTE_TELEMETRY
5206 static int
5207 handle_port_list(const char *cmd __rte_unused,
5208                 const char *params __rte_unused,
5209                 struct rte_tel_data *d)
5210 {
5211         int port_id;
5212
5213         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5214         RTE_ETH_FOREACH_DEV(port_id)
5215                 rte_tel_data_add_array_int(d, port_id);
5216         return 0;
5217 }
5218
5219 static int
5220 handle_port_xstats(const char *cmd __rte_unused,
5221                 const char *params,
5222                 struct rte_tel_data *d)
5223 {
5224         struct rte_eth_xstat *eth_xstats;
5225         struct rte_eth_xstat_name *xstat_names;
5226         int port_id, num_xstats;
5227         int i, ret;
5228
5229         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5230                 return -1;
5231
5232         port_id = atoi(params);
5233         if (!rte_eth_dev_is_valid_port(port_id))
5234                 return -1;
5235
5236         num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5237         if (num_xstats < 0)
5238                 return -1;
5239
5240         /* use one malloc for both names and stats */
5241         eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5242                         sizeof(struct rte_eth_xstat_name)) * num_xstats);
5243         if (eth_xstats == NULL)
5244                 return -1;
5245         xstat_names = (void *)&eth_xstats[num_xstats];
5246
5247         ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5248         if (ret < 0 || ret > num_xstats) {
5249                 free(eth_xstats);
5250                 return -1;
5251         }
5252
5253         ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5254         if (ret < 0 || ret > num_xstats) {
5255                 free(eth_xstats);
5256                 return -1;
5257         }
5258
5259         rte_tel_data_start_dict(d);
5260         for (i = 0; i < num_xstats; i++)
5261                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5262                                 eth_xstats[i].value);
5263         return 0;
5264 }
5265
5266 static int
5267 handle_port_link_status(const char *cmd __rte_unused,
5268                 const char *params,
5269                 struct rte_tel_data *d)
5270 {
5271         static const char *status_str = "status";
5272         int ret, port_id;
5273         struct rte_eth_link link;
5274
5275         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5276                 return -1;
5277
5278         port_id = atoi(params);
5279         if (!rte_eth_dev_is_valid_port(port_id))
5280                 return -1;
5281
5282         ret = rte_eth_link_get(port_id, &link);
5283         if (ret < 0)
5284                 return -1;
5285
5286         rte_tel_data_start_dict(d);
5287         if (!link.link_status) {
5288                 rte_tel_data_add_dict_string(d, status_str, "DOWN");
5289                 return 0;
5290         }
5291         rte_tel_data_add_dict_string(d, status_str, "UP");
5292         rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5293         rte_tel_data_add_dict_string(d, "duplex",
5294                         (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
5295                                 "full-duplex" : "half-duplex");
5296         return 0;
5297 }
5298 #endif
5299
5300 RTE_INIT(ethdev_init_log)
5301 {
5302         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
5303         if (rte_eth_dev_logtype >= 0)
5304                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
5305 #ifdef RTE_LIBRTE_TELEMETRY
5306         rte_telemetry_register_cmd("/ethdev/list", handle_port_list,
5307                         "Returns list of available ethdev ports. Takes no parameters");
5308         rte_telemetry_register_cmd("/ethdev/xstats", handle_port_xstats,
5309                         "Returns the extended stats for a port. Parameters: int port_id");
5310         rte_telemetry_register_cmd("/ethdev/link_status",
5311                         handle_port_link_status,
5312                         "Returns the link status for a port. Parameters: int port_id");
5313 #endif
5314 }