ethdev: add maximum LRO packet size
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <inttypes.h>
16 #include <netinet/in.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_kvargs.h>
39 #include <rte_class.h>
40 #include <rte_ether.h>
41
42 #include "rte_ethdev.h"
43 #include "rte_ethdev_driver.h"
44 #include "ethdev_profile.h"
45 #include "ethdev_private.h"
46
47 int rte_eth_dev_logtype;
48
49 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
50 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
51
52 /* spinlock for eth device callbacks */
53 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
54
55 /* spinlock for add/remove rx callbacks */
56 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
57
58 /* spinlock for add/remove tx callbacks */
59 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
60
61 /* spinlock for shared data allocation */
62 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
63
64 /* store statistics names and its offset in stats structure  */
65 struct rte_eth_xstats_name_off {
66         char name[RTE_ETH_XSTATS_NAME_SIZE];
67         unsigned offset;
68 };
69
70 /* Shared memory between primary and secondary processes. */
71 static struct {
72         uint64_t next_owner_id;
73         rte_spinlock_t ownership_lock;
74         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
75 } *rte_eth_dev_shared_data;
76
77 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
78         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
79         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
80         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
81         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
82         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
83         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
84         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
85         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
86                 rx_nombuf)},
87 };
88
89 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
90
91 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
92         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
93         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
94         {"errors", offsetof(struct rte_eth_stats, q_errors)},
95 };
96
97 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
98                 sizeof(rte_rxq_stats_strings[0]))
99
100 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
101         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
102         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
103 };
104 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
105                 sizeof(rte_txq_stats_strings[0]))
106
107 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
108         { DEV_RX_OFFLOAD_##_name, #_name }
109
110 static const struct {
111         uint64_t offload;
112         const char *name;
113 } rte_rx_offload_names[] = {
114         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
126         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129         RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
130         RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131         RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
132         RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
133 };
134
135 #undef RTE_RX_OFFLOAD_BIT2STR
136
137 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
138         { DEV_TX_OFFLOAD_##_name, #_name }
139
140 static const struct {
141         uint64_t offload;
142         const char *name;
143 } rte_tx_offload_names[] = {
144         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
145         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
146         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
147         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
148         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
149         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
151         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
152         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
153         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
154         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
155         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
156         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
157         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
158         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
159         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
160         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
161         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
162         RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
163         RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
164         RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
165 };
166
167 #undef RTE_TX_OFFLOAD_BIT2STR
168
169 /**
170  * The user application callback description.
171  *
172  * It contains callback address to be registered by user application,
173  * the pointer to the parameters for callback, and the event type.
174  */
175 struct rte_eth_dev_callback {
176         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
178         void *cb_arg;                           /**< Parameter for callback */
179         void *ret_param;                        /**< Return parameter */
180         enum rte_eth_event_type event;          /**< Interrupt event type */
181         uint32_t active;                        /**< Callback is executing */
182 };
183
184 enum {
185         STAT_QMAP_TX = 0,
186         STAT_QMAP_RX
187 };
188
189 int
190 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
191 {
192         int ret;
193         struct rte_devargs devargs = {.args = NULL};
194         const char *bus_param_key;
195         char *bus_str = NULL;
196         char *cls_str = NULL;
197         int str_size;
198
199         memset(iter, 0, sizeof(*iter));
200
201         /*
202          * The devargs string may use various syntaxes:
203          *   - 0000:08:00.0,representor=[1-3]
204          *   - pci:0000:06:00.0,representor=[0,5]
205          *   - class=eth,mac=00:11:22:33:44:55
206          * A new syntax is in development (not yet supported):
207          *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
208          */
209
210         /*
211          * Handle pure class filter (i.e. without any bus-level argument),
212          * from future new syntax.
213          * rte_devargs_parse() is not yet supporting the new syntax,
214          * that's why this simple case is temporarily parsed here.
215          */
216 #define iter_anybus_str "class=eth,"
217         if (strncmp(devargs_str, iter_anybus_str,
218                         strlen(iter_anybus_str)) == 0) {
219                 iter->cls_str = devargs_str + strlen(iter_anybus_str);
220                 goto end;
221         }
222
223         /* Split bus, device and parameters. */
224         ret = rte_devargs_parse(&devargs, devargs_str);
225         if (ret != 0)
226                 goto error;
227
228         /*
229          * Assume parameters of old syntax can match only at ethdev level.
230          * Extra parameters will be ignored, thanks to "+" prefix.
231          */
232         str_size = strlen(devargs.args) + 2;
233         cls_str = malloc(str_size);
234         if (cls_str == NULL) {
235                 ret = -ENOMEM;
236                 goto error;
237         }
238         ret = snprintf(cls_str, str_size, "+%s", devargs.args);
239         if (ret != str_size - 1) {
240                 ret = -EINVAL;
241                 goto error;
242         }
243         iter->cls_str = cls_str;
244         free(devargs.args); /* allocated by rte_devargs_parse() */
245         devargs.args = NULL;
246
247         iter->bus = devargs.bus;
248         if (iter->bus->dev_iterate == NULL) {
249                 ret = -ENOTSUP;
250                 goto error;
251         }
252
253         /* Convert bus args to new syntax for use with new API dev_iterate. */
254         if (strcmp(iter->bus->name, "vdev") == 0) {
255                 bus_param_key = "name";
256         } else if (strcmp(iter->bus->name, "pci") == 0) {
257                 bus_param_key = "addr";
258         } else {
259                 ret = -ENOTSUP;
260                 goto error;
261         }
262         str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
263         bus_str = malloc(str_size);
264         if (bus_str == NULL) {
265                 ret = -ENOMEM;
266                 goto error;
267         }
268         ret = snprintf(bus_str, str_size, "%s=%s",
269                         bus_param_key, devargs.name);
270         if (ret != str_size - 1) {
271                 ret = -EINVAL;
272                 goto error;
273         }
274         iter->bus_str = bus_str;
275
276 end:
277         iter->cls = rte_class_find_by_name("eth");
278         return 0;
279
280 error:
281         if (ret == -ENOTSUP)
282                 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
283                                 iter->bus->name);
284         free(devargs.args);
285         free(bus_str);
286         free(cls_str);
287         return ret;
288 }
289
290 uint16_t
291 rte_eth_iterator_next(struct rte_dev_iterator *iter)
292 {
293         if (iter->cls == NULL) /* invalid ethdev iterator */
294                 return RTE_MAX_ETHPORTS;
295
296         do { /* loop to try all matching rte_device */
297                 /* If not pure ethdev filter and */
298                 if (iter->bus != NULL &&
299                                 /* not in middle of rte_eth_dev iteration, */
300                                 iter->class_device == NULL) {
301                         /* get next rte_device to try. */
302                         iter->device = iter->bus->dev_iterate(
303                                         iter->device, iter->bus_str, iter);
304                         if (iter->device == NULL)
305                                 break; /* no more rte_device candidate */
306                 }
307                 /* A device is matching bus part, need to check ethdev part. */
308                 iter->class_device = iter->cls->dev_iterate(
309                                 iter->class_device, iter->cls_str, iter);
310                 if (iter->class_device != NULL)
311                         return eth_dev_to_id(iter->class_device); /* match */
312         } while (iter->bus != NULL); /* need to try next rte_device */
313
314         /* No more ethdev port to iterate. */
315         rte_eth_iterator_cleanup(iter);
316         return RTE_MAX_ETHPORTS;
317 }
318
319 void
320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
321 {
322         if (iter->bus_str == NULL)
323                 return; /* nothing to free in pure class filter */
324         free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
325         free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
326         memset(iter, 0, sizeof(*iter));
327 }
328
329 uint16_t
330 rte_eth_find_next(uint16_t port_id)
331 {
332         while (port_id < RTE_MAX_ETHPORTS &&
333                         rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
334                 port_id++;
335
336         if (port_id >= RTE_MAX_ETHPORTS)
337                 return RTE_MAX_ETHPORTS;
338
339         return port_id;
340 }
341
342 /*
343  * Macro to iterate over all valid ports for internal usage.
344  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
345  */
346 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
347         for (port_id = rte_eth_find_next(0); \
348              port_id < RTE_MAX_ETHPORTS; \
349              port_id = rte_eth_find_next(port_id + 1))
350
351 uint16_t
352 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
353 {
354         port_id = rte_eth_find_next(port_id);
355         while (port_id < RTE_MAX_ETHPORTS &&
356                         rte_eth_devices[port_id].device != parent)
357                 port_id = rte_eth_find_next(port_id + 1);
358
359         return port_id;
360 }
361
362 uint16_t
363 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
364 {
365         RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
366         return rte_eth_find_next_of(port_id,
367                         rte_eth_devices[ref_port_id].device);
368 }
369
370 static void
371 rte_eth_dev_shared_data_prepare(void)
372 {
373         const unsigned flags = 0;
374         const struct rte_memzone *mz;
375
376         rte_spinlock_lock(&rte_eth_shared_data_lock);
377
378         if (rte_eth_dev_shared_data == NULL) {
379                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
380                         /* Allocate port data and ownership shared memory. */
381                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
382                                         sizeof(*rte_eth_dev_shared_data),
383                                         rte_socket_id(), flags);
384                 } else
385                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
386                 if (mz == NULL)
387                         rte_panic("Cannot allocate ethdev shared data\n");
388
389                 rte_eth_dev_shared_data = mz->addr;
390                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
391                         rte_eth_dev_shared_data->next_owner_id =
392                                         RTE_ETH_DEV_NO_OWNER + 1;
393                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
394                         memset(rte_eth_dev_shared_data->data, 0,
395                                sizeof(rte_eth_dev_shared_data->data));
396                 }
397         }
398
399         rte_spinlock_unlock(&rte_eth_shared_data_lock);
400 }
401
402 static bool
403 is_allocated(const struct rte_eth_dev *ethdev)
404 {
405         return ethdev->data->name[0] != '\0';
406 }
407
408 static struct rte_eth_dev *
409 _rte_eth_dev_allocated(const char *name)
410 {
411         unsigned i;
412
413         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
414                 if (rte_eth_devices[i].data != NULL &&
415                     strcmp(rte_eth_devices[i].data->name, name) == 0)
416                         return &rte_eth_devices[i];
417         }
418         return NULL;
419 }
420
421 struct rte_eth_dev *
422 rte_eth_dev_allocated(const char *name)
423 {
424         struct rte_eth_dev *ethdev;
425
426         rte_eth_dev_shared_data_prepare();
427
428         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
429
430         ethdev = _rte_eth_dev_allocated(name);
431
432         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
433
434         return ethdev;
435 }
436
437 static uint16_t
438 rte_eth_dev_find_free_port(void)
439 {
440         unsigned i;
441
442         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
443                 /* Using shared name field to find a free port. */
444                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
445                         RTE_ASSERT(rte_eth_devices[i].state ==
446                                    RTE_ETH_DEV_UNUSED);
447                         return i;
448                 }
449         }
450         return RTE_MAX_ETHPORTS;
451 }
452
453 static struct rte_eth_dev *
454 eth_dev_get(uint16_t port_id)
455 {
456         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
457
458         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
459
460         return eth_dev;
461 }
462
463 struct rte_eth_dev *
464 rte_eth_dev_allocate(const char *name)
465 {
466         uint16_t port_id;
467         struct rte_eth_dev *eth_dev = NULL;
468         size_t name_len;
469
470         name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
471         if (name_len == 0) {
472                 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
473                 return NULL;
474         }
475
476         if (name_len >= RTE_ETH_NAME_MAX_LEN) {
477                 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
478                 return NULL;
479         }
480
481         rte_eth_dev_shared_data_prepare();
482
483         /* Synchronize port creation between primary and secondary threads. */
484         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
485
486         if (_rte_eth_dev_allocated(name) != NULL) {
487                 RTE_ETHDEV_LOG(ERR,
488                         "Ethernet device with name %s already allocated\n",
489                         name);
490                 goto unlock;
491         }
492
493         port_id = rte_eth_dev_find_free_port();
494         if (port_id == RTE_MAX_ETHPORTS) {
495                 RTE_ETHDEV_LOG(ERR,
496                         "Reached maximum number of Ethernet ports\n");
497                 goto unlock;
498         }
499
500         eth_dev = eth_dev_get(port_id);
501         strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
502         eth_dev->data->port_id = port_id;
503         eth_dev->data->mtu = RTE_ETHER_MTU;
504
505 unlock:
506         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
507
508         return eth_dev;
509 }
510
511 /*
512  * Attach to a port already registered by the primary process, which
513  * makes sure that the same device would have the same port id both
514  * in the primary and secondary process.
515  */
516 struct rte_eth_dev *
517 rte_eth_dev_attach_secondary(const char *name)
518 {
519         uint16_t i;
520         struct rte_eth_dev *eth_dev = NULL;
521
522         rte_eth_dev_shared_data_prepare();
523
524         /* Synchronize port attachment to primary port creation and release. */
525         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
526
527         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
528                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
529                         break;
530         }
531         if (i == RTE_MAX_ETHPORTS) {
532                 RTE_ETHDEV_LOG(ERR,
533                         "Device %s is not driven by the primary process\n",
534                         name);
535         } else {
536                 eth_dev = eth_dev_get(i);
537                 RTE_ASSERT(eth_dev->data->port_id == i);
538         }
539
540         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
541         return eth_dev;
542 }
543
544 int
545 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
546 {
547         if (eth_dev == NULL)
548                 return -EINVAL;
549
550         rte_eth_dev_shared_data_prepare();
551
552         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
553                 _rte_eth_dev_callback_process(eth_dev,
554                                 RTE_ETH_EVENT_DESTROY, NULL);
555
556         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
557
558         eth_dev->state = RTE_ETH_DEV_UNUSED;
559
560         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
561                 rte_free(eth_dev->data->rx_queues);
562                 rte_free(eth_dev->data->tx_queues);
563                 rte_free(eth_dev->data->mac_addrs);
564                 rte_free(eth_dev->data->hash_mac_addrs);
565                 rte_free(eth_dev->data->dev_private);
566                 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
567         }
568
569         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
570
571         return 0;
572 }
573
574 int
575 rte_eth_dev_is_valid_port(uint16_t port_id)
576 {
577         if (port_id >= RTE_MAX_ETHPORTS ||
578             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
579                 return 0;
580         else
581                 return 1;
582 }
583
584 static int
585 rte_eth_is_valid_owner_id(uint64_t owner_id)
586 {
587         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
588             rte_eth_dev_shared_data->next_owner_id <= owner_id)
589                 return 0;
590         return 1;
591 }
592
593 uint64_t
594 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
595 {
596         port_id = rte_eth_find_next(port_id);
597         while (port_id < RTE_MAX_ETHPORTS &&
598                         rte_eth_devices[port_id].data->owner.id != owner_id)
599                 port_id = rte_eth_find_next(port_id + 1);
600
601         return port_id;
602 }
603
604 int
605 rte_eth_dev_owner_new(uint64_t *owner_id)
606 {
607         rte_eth_dev_shared_data_prepare();
608
609         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
610
611         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
612
613         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
614         return 0;
615 }
616
617 static int
618 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
619                        const struct rte_eth_dev_owner *new_owner)
620 {
621         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
622         struct rte_eth_dev_owner *port_owner;
623
624         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
625                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
626                         port_id);
627                 return -ENODEV;
628         }
629
630         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
631             !rte_eth_is_valid_owner_id(old_owner_id)) {
632                 RTE_ETHDEV_LOG(ERR,
633                         "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
634                        old_owner_id, new_owner->id);
635                 return -EINVAL;
636         }
637
638         port_owner = &rte_eth_devices[port_id].data->owner;
639         if (port_owner->id != old_owner_id) {
640                 RTE_ETHDEV_LOG(ERR,
641                         "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
642                         port_id, port_owner->name, port_owner->id);
643                 return -EPERM;
644         }
645
646         /* can not truncate (same structure) */
647         strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
648
649         port_owner->id = new_owner->id;
650
651         RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
652                 port_id, new_owner->name, new_owner->id);
653
654         return 0;
655 }
656
657 int
658 rte_eth_dev_owner_set(const uint16_t port_id,
659                       const struct rte_eth_dev_owner *owner)
660 {
661         int ret;
662
663         rte_eth_dev_shared_data_prepare();
664
665         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
666
667         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
668
669         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
670         return ret;
671 }
672
673 int
674 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
675 {
676         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
677                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
678         int ret;
679
680         rte_eth_dev_shared_data_prepare();
681
682         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
683
684         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
685
686         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
687         return ret;
688 }
689
690 int
691 rte_eth_dev_owner_delete(const uint64_t owner_id)
692 {
693         uint16_t port_id;
694         int ret = 0;
695
696         rte_eth_dev_shared_data_prepare();
697
698         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
699
700         if (rte_eth_is_valid_owner_id(owner_id)) {
701                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
702                         if (rte_eth_devices[port_id].data->owner.id == owner_id)
703                                 memset(&rte_eth_devices[port_id].data->owner, 0,
704                                        sizeof(struct rte_eth_dev_owner));
705                 RTE_ETHDEV_LOG(NOTICE,
706                         "All port owners owned by %016"PRIx64" identifier have removed\n",
707                         owner_id);
708         } else {
709                 RTE_ETHDEV_LOG(ERR,
710                                "Invalid owner id=%016"PRIx64"\n",
711                                owner_id);
712                 ret = -EINVAL;
713         }
714
715         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
716
717         return ret;
718 }
719
720 int
721 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
722 {
723         int ret = 0;
724         struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
725
726         rte_eth_dev_shared_data_prepare();
727
728         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
729
730         if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
731                 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
732                         port_id);
733                 ret = -ENODEV;
734         } else {
735                 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
736         }
737
738         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
739         return ret;
740 }
741
742 int
743 rte_eth_dev_socket_id(uint16_t port_id)
744 {
745         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
746         return rte_eth_devices[port_id].data->numa_node;
747 }
748
749 void *
750 rte_eth_dev_get_sec_ctx(uint16_t port_id)
751 {
752         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
753         return rte_eth_devices[port_id].security_ctx;
754 }
755
756 uint16_t
757 rte_eth_dev_count_avail(void)
758 {
759         uint16_t p;
760         uint16_t count;
761
762         count = 0;
763
764         RTE_ETH_FOREACH_DEV(p)
765                 count++;
766
767         return count;
768 }
769
770 uint16_t
771 rte_eth_dev_count_total(void)
772 {
773         uint16_t port, count = 0;
774
775         RTE_ETH_FOREACH_VALID_DEV(port)
776                 count++;
777
778         return count;
779 }
780
781 int
782 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
783 {
784         char *tmp;
785
786         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
787
788         if (name == NULL) {
789                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
790                 return -EINVAL;
791         }
792
793         /* shouldn't check 'rte_eth_devices[i].data',
794          * because it might be overwritten by VDEV PMD */
795         tmp = rte_eth_dev_shared_data->data[port_id].name;
796         strcpy(name, tmp);
797         return 0;
798 }
799
800 int
801 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
802 {
803         uint32_t pid;
804
805         if (name == NULL) {
806                 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
807                 return -EINVAL;
808         }
809
810         RTE_ETH_FOREACH_VALID_DEV(pid)
811                 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
812                         *port_id = pid;
813                         return 0;
814                 }
815
816         return -ENODEV;
817 }
818
819 static int
820 eth_err(uint16_t port_id, int ret)
821 {
822         if (ret == 0)
823                 return 0;
824         if (rte_eth_dev_is_removed(port_id))
825                 return -EIO;
826         return ret;
827 }
828
829 static int
830 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
831 {
832         uint16_t old_nb_queues = dev->data->nb_rx_queues;
833         void **rxq;
834         unsigned i;
835
836         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
837                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
838                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
839                                 RTE_CACHE_LINE_SIZE);
840                 if (dev->data->rx_queues == NULL) {
841                         dev->data->nb_rx_queues = 0;
842                         return -(ENOMEM);
843                 }
844         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
845                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
846
847                 rxq = dev->data->rx_queues;
848
849                 for (i = nb_queues; i < old_nb_queues; i++)
850                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
851                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
852                                 RTE_CACHE_LINE_SIZE);
853                 if (rxq == NULL)
854                         return -(ENOMEM);
855                 if (nb_queues > old_nb_queues) {
856                         uint16_t new_qs = nb_queues - old_nb_queues;
857
858                         memset(rxq + old_nb_queues, 0,
859                                 sizeof(rxq[0]) * new_qs);
860                 }
861
862                 dev->data->rx_queues = rxq;
863
864         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
865                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
866
867                 rxq = dev->data->rx_queues;
868
869                 for (i = nb_queues; i < old_nb_queues; i++)
870                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
871
872                 rte_free(dev->data->rx_queues);
873                 dev->data->rx_queues = NULL;
874         }
875         dev->data->nb_rx_queues = nb_queues;
876         return 0;
877 }
878
879 int
880 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
881 {
882         struct rte_eth_dev *dev;
883
884         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
885
886         dev = &rte_eth_devices[port_id];
887         if (!dev->data->dev_started) {
888                 RTE_ETHDEV_LOG(ERR,
889                         "Port %u must be started before start any queue\n",
890                         port_id);
891                 return -EINVAL;
892         }
893
894         if (rx_queue_id >= dev->data->nb_rx_queues) {
895                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
896                 return -EINVAL;
897         }
898
899         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
900
901         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
902                 RTE_ETHDEV_LOG(INFO,
903                         "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
904                         rx_queue_id, port_id);
905                 return -EINVAL;
906         }
907
908         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
909                 RTE_ETHDEV_LOG(INFO,
910                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
911                         rx_queue_id, port_id);
912                 return 0;
913         }
914
915         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
916                                                              rx_queue_id));
917
918 }
919
920 int
921 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
922 {
923         struct rte_eth_dev *dev;
924
925         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
926
927         dev = &rte_eth_devices[port_id];
928         if (rx_queue_id >= dev->data->nb_rx_queues) {
929                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
930                 return -EINVAL;
931         }
932
933         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
934
935         if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
936                 RTE_ETHDEV_LOG(INFO,
937                         "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
938                         rx_queue_id, port_id);
939                 return -EINVAL;
940         }
941
942         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
943                 RTE_ETHDEV_LOG(INFO,
944                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
945                         rx_queue_id, port_id);
946                 return 0;
947         }
948
949         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
950
951 }
952
953 int
954 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
955 {
956         struct rte_eth_dev *dev;
957
958         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
959
960         dev = &rte_eth_devices[port_id];
961         if (!dev->data->dev_started) {
962                 RTE_ETHDEV_LOG(ERR,
963                         "Port %u must be started before start any queue\n",
964                         port_id);
965                 return -EINVAL;
966         }
967
968         if (tx_queue_id >= dev->data->nb_tx_queues) {
969                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
970                 return -EINVAL;
971         }
972
973         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
974
975         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
976                 RTE_ETHDEV_LOG(INFO,
977                         "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
978                         tx_queue_id, port_id);
979                 return -EINVAL;
980         }
981
982         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
983                 RTE_ETHDEV_LOG(INFO,
984                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
985                         tx_queue_id, port_id);
986                 return 0;
987         }
988
989         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
990 }
991
992 int
993 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
994 {
995         struct rte_eth_dev *dev;
996
997         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
998
999         dev = &rte_eth_devices[port_id];
1000         if (tx_queue_id >= dev->data->nb_tx_queues) {
1001                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1002                 return -EINVAL;
1003         }
1004
1005         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1006
1007         if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1008                 RTE_ETHDEV_LOG(INFO,
1009                         "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1010                         tx_queue_id, port_id);
1011                 return -EINVAL;
1012         }
1013
1014         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1015                 RTE_ETHDEV_LOG(INFO,
1016                         "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1017                         tx_queue_id, port_id);
1018                 return 0;
1019         }
1020
1021         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1022
1023 }
1024
1025 static int
1026 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1027 {
1028         uint16_t old_nb_queues = dev->data->nb_tx_queues;
1029         void **txq;
1030         unsigned i;
1031
1032         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1033                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1034                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
1035                                                    RTE_CACHE_LINE_SIZE);
1036                 if (dev->data->tx_queues == NULL) {
1037                         dev->data->nb_tx_queues = 0;
1038                         return -(ENOMEM);
1039                 }
1040         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1041                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1042
1043                 txq = dev->data->tx_queues;
1044
1045                 for (i = nb_queues; i < old_nb_queues; i++)
1046                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1047                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1048                                   RTE_CACHE_LINE_SIZE);
1049                 if (txq == NULL)
1050                         return -ENOMEM;
1051                 if (nb_queues > old_nb_queues) {
1052                         uint16_t new_qs = nb_queues - old_nb_queues;
1053
1054                         memset(txq + old_nb_queues, 0,
1055                                sizeof(txq[0]) * new_qs);
1056                 }
1057
1058                 dev->data->tx_queues = txq;
1059
1060         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1061                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1062
1063                 txq = dev->data->tx_queues;
1064
1065                 for (i = nb_queues; i < old_nb_queues; i++)
1066                         (*dev->dev_ops->tx_queue_release)(txq[i]);
1067
1068                 rte_free(dev->data->tx_queues);
1069                 dev->data->tx_queues = NULL;
1070         }
1071         dev->data->nb_tx_queues = nb_queues;
1072         return 0;
1073 }
1074
1075 uint32_t
1076 rte_eth_speed_bitflag(uint32_t speed, int duplex)
1077 {
1078         switch (speed) {
1079         case ETH_SPEED_NUM_10M:
1080                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1081         case ETH_SPEED_NUM_100M:
1082                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1083         case ETH_SPEED_NUM_1G:
1084                 return ETH_LINK_SPEED_1G;
1085         case ETH_SPEED_NUM_2_5G:
1086                 return ETH_LINK_SPEED_2_5G;
1087         case ETH_SPEED_NUM_5G:
1088                 return ETH_LINK_SPEED_5G;
1089         case ETH_SPEED_NUM_10G:
1090                 return ETH_LINK_SPEED_10G;
1091         case ETH_SPEED_NUM_20G:
1092                 return ETH_LINK_SPEED_20G;
1093         case ETH_SPEED_NUM_25G:
1094                 return ETH_LINK_SPEED_25G;
1095         case ETH_SPEED_NUM_40G:
1096                 return ETH_LINK_SPEED_40G;
1097         case ETH_SPEED_NUM_50G:
1098                 return ETH_LINK_SPEED_50G;
1099         case ETH_SPEED_NUM_56G:
1100                 return ETH_LINK_SPEED_56G;
1101         case ETH_SPEED_NUM_100G:
1102                 return ETH_LINK_SPEED_100G;
1103         default:
1104                 return 0;
1105         }
1106 }
1107
1108 const char *
1109 rte_eth_dev_rx_offload_name(uint64_t offload)
1110 {
1111         const char *name = "UNKNOWN";
1112         unsigned int i;
1113
1114         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1115                 if (offload == rte_rx_offload_names[i].offload) {
1116                         name = rte_rx_offload_names[i].name;
1117                         break;
1118                 }
1119         }
1120
1121         return name;
1122 }
1123
1124 const char *
1125 rte_eth_dev_tx_offload_name(uint64_t offload)
1126 {
1127         const char *name = "UNKNOWN";
1128         unsigned int i;
1129
1130         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1131                 if (offload == rte_tx_offload_names[i].offload) {
1132                         name = rte_tx_offload_names[i].name;
1133                         break;
1134                 }
1135         }
1136
1137         return name;
1138 }
1139
1140 static inline int
1141 check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1142                    uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1143 {
1144         int ret = 0;
1145
1146         if (dev_info_size == 0) {
1147                 if (config_size != max_rx_pkt_len) {
1148                         RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1149                                        " %u != %u is not allowed\n",
1150                                        port_id, config_size, max_rx_pkt_len);
1151                         ret = -EINVAL;
1152                 }
1153         } else if (config_size > dev_info_size) {
1154                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1155                                "> max allowed value %u\n", port_id, config_size,
1156                                dev_info_size);
1157                 ret = -EINVAL;
1158         } else if (config_size < RTE_ETHER_MIN_LEN) {
1159                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1160                                "< min allowed value %u\n", port_id, config_size,
1161                                (unsigned int)RTE_ETHER_MIN_LEN);
1162                 ret = -EINVAL;
1163         }
1164         return ret;
1165 }
1166
1167 /*
1168  * Validate offloads that are requested through rte_eth_dev_configure against
1169  * the offloads successfuly set by the ethernet device.
1170  *
1171  * @param port_id
1172  *   The port identifier of the Ethernet device.
1173  * @param req_offloads
1174  *   The offloads that have been requested through `rte_eth_dev_configure`.
1175  * @param set_offloads
1176  *   The offloads successfuly set by the ethernet device.
1177  * @param offload_type
1178  *   The offload type i.e. Rx/Tx string.
1179  * @param offload_name
1180  *   The function that prints the offload name.
1181  * @return
1182  *   - (0) if validation successful.
1183  *   - (-EINVAL) if requested offload has been silently disabled.
1184  *
1185  */
1186 static int
1187 validate_offloads(uint16_t port_id, uint64_t req_offloads,
1188                   uint64_t set_offloads, const char *offload_type,
1189                   const char *(*offload_name)(uint64_t))
1190 {
1191         uint64_t offloads_diff = req_offloads ^ set_offloads;
1192         uint64_t offload;
1193         int ret = 0;
1194
1195         while (offloads_diff != 0) {
1196                 /* Check if any offload is requested but not enabled. */
1197                 offload = 1ULL << __builtin_ctzll(offloads_diff);
1198                 if (offload & req_offloads) {
1199                         RTE_ETHDEV_LOG(ERR,
1200                                 "Port %u failed to enable %s offload %s\n",
1201                                 port_id, offload_type, offload_name(offload));
1202                         ret = -EINVAL;
1203                 }
1204
1205                 /* Chech if offload couldn't be disabled. */
1206                 if (offload & set_offloads) {
1207                         RTE_ETHDEV_LOG(INFO,
1208                                 "Port %u failed to disable %s offload %s\n",
1209                                 port_id, offload_type, offload_name(offload));
1210                 }
1211
1212                 offloads_diff &= ~offload;
1213         }
1214
1215         return ret;
1216 }
1217
1218 int
1219 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1220                       const struct rte_eth_conf *dev_conf)
1221 {
1222         struct rte_eth_dev *dev;
1223         struct rte_eth_dev_info dev_info;
1224         struct rte_eth_conf orig_conf;
1225         int diag;
1226         int ret;
1227
1228         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1229
1230         dev = &rte_eth_devices[port_id];
1231
1232         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1233
1234         if (dev->data->dev_started) {
1235                 RTE_ETHDEV_LOG(ERR,
1236                         "Port %u must be stopped to allow configuration\n",
1237                         port_id);
1238                 return -EBUSY;
1239         }
1240
1241          /* Store original config, as rollback required on failure */
1242         memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1243
1244         /*
1245          * Copy the dev_conf parameter into the dev structure.
1246          * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1247          */
1248         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
1249
1250         ret = rte_eth_dev_info_get(port_id, &dev_info);
1251         if (ret != 0)
1252                 goto rollback;
1253
1254         /* If number of queues specified by application for both Rx and Tx is
1255          * zero, use driver preferred values. This cannot be done individually
1256          * as it is valid for either Tx or Rx (but not both) to be zero.
1257          * If driver does not provide any preferred valued, fall back on
1258          * EAL defaults.
1259          */
1260         if (nb_rx_q == 0 && nb_tx_q == 0) {
1261                 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1262                 if (nb_rx_q == 0)
1263                         nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1264                 nb_tx_q = dev_info.default_txportconf.nb_queues;
1265                 if (nb_tx_q == 0)
1266                         nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1267         }
1268
1269         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1270                 RTE_ETHDEV_LOG(ERR,
1271                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1272                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1273                 ret = -EINVAL;
1274                 goto rollback;
1275         }
1276
1277         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1278                 RTE_ETHDEV_LOG(ERR,
1279                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1280                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1281                 ret = -EINVAL;
1282                 goto rollback;
1283         }
1284
1285         /*
1286          * Check that the numbers of RX and TX queues are not greater
1287          * than the maximum number of RX and TX queues supported by the
1288          * configured device.
1289          */
1290         if (nb_rx_q > dev_info.max_rx_queues) {
1291                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1292                         port_id, nb_rx_q, dev_info.max_rx_queues);
1293                 ret = -EINVAL;
1294                 goto rollback;
1295         }
1296
1297         if (nb_tx_q > dev_info.max_tx_queues) {
1298                 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1299                         port_id, nb_tx_q, dev_info.max_tx_queues);
1300                 ret = -EINVAL;
1301                 goto rollback;
1302         }
1303
1304         /* Check that the device supports requested interrupts */
1305         if ((dev_conf->intr_conf.lsc == 1) &&
1306                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1307                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1308                         dev->device->driver->name);
1309                 ret = -EINVAL;
1310                 goto rollback;
1311         }
1312         if ((dev_conf->intr_conf.rmv == 1) &&
1313                         (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1314                 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1315                         dev->device->driver->name);
1316                 ret = -EINVAL;
1317                 goto rollback;
1318         }
1319
1320         /*
1321          * If jumbo frames are enabled, check that the maximum RX packet
1322          * length is supported by the configured device.
1323          */
1324         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1325                 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1326                         RTE_ETHDEV_LOG(ERR,
1327                                 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1328                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1329                                 dev_info.max_rx_pktlen);
1330                         ret = -EINVAL;
1331                         goto rollback;
1332                 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
1333                         RTE_ETHDEV_LOG(ERR,
1334                                 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1335                                 port_id, dev_conf->rxmode.max_rx_pkt_len,
1336                                 (unsigned int)RTE_ETHER_MIN_LEN);
1337                         ret = -EINVAL;
1338                         goto rollback;
1339                 }
1340         } else {
1341                 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1342                         dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
1343                         /* Use default value */
1344                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1345                                                         RTE_ETHER_MAX_LEN;
1346         }
1347
1348         /*
1349          * If LRO is enabled, check that the maximum aggregated packet
1350          * size is supported by the configured device.
1351          */
1352         if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1353                 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1354                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1355                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1356                 ret = check_lro_pkt_size(port_id,
1357                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1358                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1359                                 dev_info.max_lro_pkt_size);
1360                 if (ret != 0)
1361                         goto rollback;
1362         }
1363
1364         /* Any requested offloading must be within its device capabilities */
1365         if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1366              dev_conf->rxmode.offloads) {
1367                 RTE_ETHDEV_LOG(ERR,
1368                         "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1369                         "capabilities 0x%"PRIx64" in %s()\n",
1370                         port_id, dev_conf->rxmode.offloads,
1371                         dev_info.rx_offload_capa,
1372                         __func__);
1373                 ret = -EINVAL;
1374                 goto rollback;
1375         }
1376         if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1377              dev_conf->txmode.offloads) {
1378                 RTE_ETHDEV_LOG(ERR,
1379                         "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1380                         "capabilities 0x%"PRIx64" in %s()\n",
1381                         port_id, dev_conf->txmode.offloads,
1382                         dev_info.tx_offload_capa,
1383                         __func__);
1384                 ret = -EINVAL;
1385                 goto rollback;
1386         }
1387
1388         dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1389                 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1390
1391         /* Check that device supports requested rss hash functions. */
1392         if ((dev_info.flow_type_rss_offloads |
1393              dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1394             dev_info.flow_type_rss_offloads) {
1395                 RTE_ETHDEV_LOG(ERR,
1396                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1397                         port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1398                         dev_info.flow_type_rss_offloads);
1399                 ret = -EINVAL;
1400                 goto rollback;
1401         }
1402
1403         /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1404         if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1405             (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1406                 RTE_ETHDEV_LOG(ERR,
1407                         "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested",
1408                         port_id,
1409                         rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1410                 ret = -EINVAL;
1411                 goto rollback;
1412         }
1413
1414         /*
1415          * Setup new number of RX/TX queues and reconfigure device.
1416          */
1417         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1418         if (diag != 0) {
1419                 RTE_ETHDEV_LOG(ERR,
1420                         "Port%u rte_eth_dev_rx_queue_config = %d\n",
1421                         port_id, diag);
1422                 ret = diag;
1423                 goto rollback;
1424         }
1425
1426         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1427         if (diag != 0) {
1428                 RTE_ETHDEV_LOG(ERR,
1429                         "Port%u rte_eth_dev_tx_queue_config = %d\n",
1430                         port_id, diag);
1431                 rte_eth_dev_rx_queue_config(dev, 0);
1432                 ret = diag;
1433                 goto rollback;
1434         }
1435
1436         diag = (*dev->dev_ops->dev_configure)(dev);
1437         if (diag != 0) {
1438                 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1439                         port_id, diag);
1440                 ret = eth_err(port_id, diag);
1441                 goto reset_queues;
1442         }
1443
1444         /* Initialize Rx profiling if enabled at compilation time. */
1445         diag = __rte_eth_dev_profile_init(port_id, dev);
1446         if (diag != 0) {
1447                 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1448                         port_id, diag);
1449                 ret = eth_err(port_id, diag);
1450                 goto reset_queues;
1451         }
1452
1453         /* Validate Rx offloads. */
1454         diag = validate_offloads(port_id,
1455                         dev_conf->rxmode.offloads,
1456                         dev->data->dev_conf.rxmode.offloads, "Rx",
1457                         rte_eth_dev_rx_offload_name);
1458         if (diag != 0) {
1459                 ret = diag;
1460                 goto reset_queues;
1461         }
1462
1463         /* Validate Tx offloads. */
1464         diag = validate_offloads(port_id,
1465                         dev_conf->txmode.offloads,
1466                         dev->data->dev_conf.txmode.offloads, "Tx",
1467                         rte_eth_dev_tx_offload_name);
1468         if (diag != 0) {
1469                 ret = diag;
1470                 goto reset_queues;
1471         }
1472
1473         return 0;
1474 reset_queues:
1475         rte_eth_dev_rx_queue_config(dev, 0);
1476         rte_eth_dev_tx_queue_config(dev, 0);
1477 rollback:
1478         memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1479
1480         return ret;
1481 }
1482
1483 void
1484 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1485 {
1486         if (dev->data->dev_started) {
1487                 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1488                         dev->data->port_id);
1489                 return;
1490         }
1491
1492         rte_eth_dev_rx_queue_config(dev, 0);
1493         rte_eth_dev_tx_queue_config(dev, 0);
1494
1495         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1496 }
1497
1498 static void
1499 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1500                         struct rte_eth_dev_info *dev_info)
1501 {
1502         struct rte_ether_addr *addr;
1503         uint16_t i;
1504         uint32_t pool = 0;
1505         uint64_t pool_mask;
1506
1507         /* replay MAC address configuration including default MAC */
1508         addr = &dev->data->mac_addrs[0];
1509         if (*dev->dev_ops->mac_addr_set != NULL)
1510                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1511         else if (*dev->dev_ops->mac_addr_add != NULL)
1512                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1513
1514         if (*dev->dev_ops->mac_addr_add != NULL) {
1515                 for (i = 1; i < dev_info->max_mac_addrs; i++) {
1516                         addr = &dev->data->mac_addrs[i];
1517
1518                         /* skip zero address */
1519                         if (rte_is_zero_ether_addr(addr))
1520                                 continue;
1521
1522                         pool = 0;
1523                         pool_mask = dev->data->mac_pool_sel[i];
1524
1525                         do {
1526                                 if (pool_mask & 1ULL)
1527                                         (*dev->dev_ops->mac_addr_add)(dev,
1528                                                 addr, i, pool);
1529                                 pool_mask >>= 1;
1530                                 pool++;
1531                         } while (pool_mask);
1532                 }
1533         }
1534 }
1535
1536 static int
1537 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1538                            struct rte_eth_dev_info *dev_info, uint16_t port_id)
1539 {
1540         int ret;
1541
1542         if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1543                 rte_eth_dev_mac_restore(dev, dev_info);
1544
1545         /* replay promiscuous configuration */
1546         /*
1547          * use callbacks directly since we don't need port_id check and
1548          * would like to bypass the same value set
1549          */
1550         if (rte_eth_promiscuous_get(port_id) == 1 &&
1551             *dev->dev_ops->promiscuous_enable != NULL) {
1552                 ret = eth_err(port_id,
1553                               (*dev->dev_ops->promiscuous_enable)(dev));
1554                 if (ret != 0 && ret != -ENOTSUP) {
1555                         RTE_ETHDEV_LOG(ERR,
1556                                 "Failed to enable promiscuous mode for device (port %u): %s\n",
1557                                 port_id, rte_strerror(-ret));
1558                         return ret;
1559                 }
1560         } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1561                    *dev->dev_ops->promiscuous_disable != NULL) {
1562                 ret = eth_err(port_id,
1563                               (*dev->dev_ops->promiscuous_disable)(dev));
1564                 if (ret != 0 && ret != -ENOTSUP) {
1565                         RTE_ETHDEV_LOG(ERR,
1566                                 "Failed to disable promiscuous mode for device (port %u): %s\n",
1567                                 port_id, rte_strerror(-ret));
1568                         return ret;
1569                 }
1570         }
1571
1572         /* replay all multicast configuration */
1573         /*
1574          * use callbacks directly since we don't need port_id check and
1575          * would like to bypass the same value set
1576          */
1577         if (rte_eth_allmulticast_get(port_id) == 1 &&
1578             *dev->dev_ops->allmulticast_enable != NULL) {
1579                 ret = eth_err(port_id,
1580                               (*dev->dev_ops->allmulticast_enable)(dev));
1581                 if (ret != 0 && ret != -ENOTSUP) {
1582                         RTE_ETHDEV_LOG(ERR,
1583                                 "Failed to enable allmulticast mode for device (port %u): %s\n",
1584                                 port_id, rte_strerror(-ret));
1585                         return ret;
1586                 }
1587         } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1588                    *dev->dev_ops->allmulticast_disable != NULL) {
1589                 ret = eth_err(port_id,
1590                               (*dev->dev_ops->allmulticast_disable)(dev));
1591                 if (ret != 0 && ret != -ENOTSUP) {
1592                         RTE_ETHDEV_LOG(ERR,
1593                                 "Failed to disable allmulticast mode for device (port %u): %s\n",
1594                                 port_id, rte_strerror(-ret));
1595                         return ret;
1596                 }
1597         }
1598
1599         return 0;
1600 }
1601
1602 int
1603 rte_eth_dev_start(uint16_t port_id)
1604 {
1605         struct rte_eth_dev *dev;
1606         struct rte_eth_dev_info dev_info;
1607         int diag;
1608         int ret;
1609
1610         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1611
1612         dev = &rte_eth_devices[port_id];
1613
1614         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1615
1616         if (dev->data->dev_started != 0) {
1617                 RTE_ETHDEV_LOG(INFO,
1618                         "Device with port_id=%"PRIu16" already started\n",
1619                         port_id);
1620                 return 0;
1621         }
1622
1623         ret = rte_eth_dev_info_get(port_id, &dev_info);
1624         if (ret != 0)
1625                 return ret;
1626
1627         /* Lets restore MAC now if device does not support live change */
1628         if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1629                 rte_eth_dev_mac_restore(dev, &dev_info);
1630
1631         diag = (*dev->dev_ops->dev_start)(dev);
1632         if (diag == 0)
1633                 dev->data->dev_started = 1;
1634         else
1635                 return eth_err(port_id, diag);
1636
1637         ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1638         if (ret != 0) {
1639                 RTE_ETHDEV_LOG(ERR,
1640                         "Error during restoring configuration for device (port %u): %s\n",
1641                         port_id, rte_strerror(-ret));
1642                 rte_eth_dev_stop(port_id);
1643                 return ret;
1644         }
1645
1646         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1647                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1648                 (*dev->dev_ops->link_update)(dev, 0);
1649         }
1650         return 0;
1651 }
1652
1653 void
1654 rte_eth_dev_stop(uint16_t port_id)
1655 {
1656         struct rte_eth_dev *dev;
1657
1658         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1659         dev = &rte_eth_devices[port_id];
1660
1661         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1662
1663         if (dev->data->dev_started == 0) {
1664                 RTE_ETHDEV_LOG(INFO,
1665                         "Device with port_id=%"PRIu16" already stopped\n",
1666                         port_id);
1667                 return;
1668         }
1669
1670         dev->data->dev_started = 0;
1671         (*dev->dev_ops->dev_stop)(dev);
1672 }
1673
1674 int
1675 rte_eth_dev_set_link_up(uint16_t port_id)
1676 {
1677         struct rte_eth_dev *dev;
1678
1679         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1680
1681         dev = &rte_eth_devices[port_id];
1682
1683         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1684         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1685 }
1686
1687 int
1688 rte_eth_dev_set_link_down(uint16_t port_id)
1689 {
1690         struct rte_eth_dev *dev;
1691
1692         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1693
1694         dev = &rte_eth_devices[port_id];
1695
1696         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1697         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1698 }
1699
1700 void
1701 rte_eth_dev_close(uint16_t port_id)
1702 {
1703         struct rte_eth_dev *dev;
1704
1705         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1706         dev = &rte_eth_devices[port_id];
1707
1708         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1709         dev->data->dev_started = 0;
1710         (*dev->dev_ops->dev_close)(dev);
1711
1712         /* check behaviour flag - temporary for PMD migration */
1713         if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1714                 /* new behaviour: send event + reset state + free all data */
1715                 rte_eth_dev_release_port(dev);
1716                 return;
1717         }
1718         RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1719                         "The driver %s should migrate to the new behaviour.\n",
1720                         dev->device->driver->name);
1721         /* old behaviour: only free queue arrays */
1722         dev->data->nb_rx_queues = 0;
1723         rte_free(dev->data->rx_queues);
1724         dev->data->rx_queues = NULL;
1725         dev->data->nb_tx_queues = 0;
1726         rte_free(dev->data->tx_queues);
1727         dev->data->tx_queues = NULL;
1728 }
1729
1730 int
1731 rte_eth_dev_reset(uint16_t port_id)
1732 {
1733         struct rte_eth_dev *dev;
1734         int ret;
1735
1736         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1737         dev = &rte_eth_devices[port_id];
1738
1739         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1740
1741         rte_eth_dev_stop(port_id);
1742         ret = dev->dev_ops->dev_reset(dev);
1743
1744         return eth_err(port_id, ret);
1745 }
1746
1747 int
1748 rte_eth_dev_is_removed(uint16_t port_id)
1749 {
1750         struct rte_eth_dev *dev;
1751         int ret;
1752
1753         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1754
1755         dev = &rte_eth_devices[port_id];
1756
1757         if (dev->state == RTE_ETH_DEV_REMOVED)
1758                 return 1;
1759
1760         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1761
1762         ret = dev->dev_ops->is_removed(dev);
1763         if (ret != 0)
1764                 /* Device is physically removed. */
1765                 dev->state = RTE_ETH_DEV_REMOVED;
1766
1767         return ret;
1768 }
1769
1770 int
1771 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1772                        uint16_t nb_rx_desc, unsigned int socket_id,
1773                        const struct rte_eth_rxconf *rx_conf,
1774                        struct rte_mempool *mp)
1775 {
1776         int ret;
1777         uint32_t mbp_buf_size;
1778         struct rte_eth_dev *dev;
1779         struct rte_eth_dev_info dev_info;
1780         struct rte_eth_rxconf local_conf;
1781         void **rxq;
1782
1783         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1784
1785         dev = &rte_eth_devices[port_id];
1786         if (rx_queue_id >= dev->data->nb_rx_queues) {
1787                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1788                 return -EINVAL;
1789         }
1790
1791         if (mp == NULL) {
1792                 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1793                 return -EINVAL;
1794         }
1795
1796         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1797
1798         /*
1799          * Check the size of the mbuf data buffer.
1800          * This value must be provided in the private data of the memory pool.
1801          * First check that the memory pool has a valid private data.
1802          */
1803         ret = rte_eth_dev_info_get(port_id, &dev_info);
1804         if (ret != 0)
1805                 return ret;
1806
1807         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1808                 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1809                         mp->name, (int)mp->private_data_size,
1810                         (int)sizeof(struct rte_pktmbuf_pool_private));
1811                 return -ENOSPC;
1812         }
1813         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1814
1815         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1816                 RTE_ETHDEV_LOG(ERR,
1817                         "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1818                         mp->name, (int)mbp_buf_size,
1819                         (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1820                         (int)RTE_PKTMBUF_HEADROOM,
1821                         (int)dev_info.min_rx_bufsize);
1822                 return -EINVAL;
1823         }
1824
1825         /* Use default specified by driver, if nb_rx_desc is zero */
1826         if (nb_rx_desc == 0) {
1827                 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1828                 /* If driver default is also zero, fall back on EAL default */
1829                 if (nb_rx_desc == 0)
1830                         nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1831         }
1832
1833         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1834                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1835                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1836
1837                 RTE_ETHDEV_LOG(ERR,
1838                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1839                         nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1840                         dev_info.rx_desc_lim.nb_min,
1841                         dev_info.rx_desc_lim.nb_align);
1842                 return -EINVAL;
1843         }
1844
1845         if (dev->data->dev_started &&
1846                 !(dev_info.dev_capa &
1847                         RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1848                 return -EBUSY;
1849
1850         if (dev->data->dev_started &&
1851                 (dev->data->rx_queue_state[rx_queue_id] !=
1852                         RTE_ETH_QUEUE_STATE_STOPPED))
1853                 return -EBUSY;
1854
1855         rxq = dev->data->rx_queues;
1856         if (rxq[rx_queue_id]) {
1857                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1858                                         -ENOTSUP);
1859                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1860                 rxq[rx_queue_id] = NULL;
1861         }
1862
1863         if (rx_conf == NULL)
1864                 rx_conf = &dev_info.default_rxconf;
1865
1866         local_conf = *rx_conf;
1867
1868         /*
1869          * If an offloading has already been enabled in
1870          * rte_eth_dev_configure(), it has been enabled on all queues,
1871          * so there is no need to enable it in this queue again.
1872          * The local_conf.offloads input to underlying PMD only carries
1873          * those offloadings which are only enabled on this queue and
1874          * not enabled on all queues.
1875          */
1876         local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1877
1878         /*
1879          * New added offloadings for this queue are those not enabled in
1880          * rte_eth_dev_configure() and they must be per-queue type.
1881          * A pure per-port offloading can't be enabled on a queue while
1882          * disabled on another queue. A pure per-port offloading can't
1883          * be enabled for any queue as new added one if it hasn't been
1884          * enabled in rte_eth_dev_configure().
1885          */
1886         if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1887              local_conf.offloads) {
1888                 RTE_ETHDEV_LOG(ERR,
1889                         "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1890                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1891                         port_id, rx_queue_id, local_conf.offloads,
1892                         dev_info.rx_queue_offload_capa,
1893                         __func__);
1894                 return -EINVAL;
1895         }
1896
1897         /*
1898          * If LRO is enabled, check that the maximum aggregated packet
1899          * size is supported by the configured device.
1900          */
1901         if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1902                 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1903                         dev->data->dev_conf.rxmode.max_lro_pkt_size =
1904                                 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1905                 int ret = check_lro_pkt_size(port_id,
1906                                 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1907                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1908                                 dev_info.max_lro_pkt_size);
1909                 if (ret != 0)
1910                         return ret;
1911         }
1912
1913         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1914                                               socket_id, &local_conf, mp);
1915         if (!ret) {
1916                 if (!dev->data->min_rx_buf_size ||
1917                     dev->data->min_rx_buf_size > mbp_buf_size)
1918                         dev->data->min_rx_buf_size = mbp_buf_size;
1919         }
1920
1921         return eth_err(port_id, ret);
1922 }
1923
1924 int
1925 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1926                                uint16_t nb_rx_desc,
1927                                const struct rte_eth_hairpin_conf *conf)
1928 {
1929         int ret;
1930         struct rte_eth_dev *dev;
1931         struct rte_eth_hairpin_cap cap;
1932         void **rxq;
1933         int i;
1934         int count;
1935
1936         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1937
1938         dev = &rte_eth_devices[port_id];
1939         if (rx_queue_id >= dev->data->nb_rx_queues) {
1940                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1941                 return -EINVAL;
1942         }
1943         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1944         if (ret != 0)
1945                 return ret;
1946         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1947                                 -ENOTSUP);
1948         /* if nb_rx_desc is zero use max number of desc from the driver. */
1949         if (nb_rx_desc == 0)
1950                 nb_rx_desc = cap.max_nb_desc;
1951         if (nb_rx_desc > cap.max_nb_desc) {
1952                 RTE_ETHDEV_LOG(ERR,
1953                         "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1954                         nb_rx_desc, cap.max_nb_desc);
1955                 return -EINVAL;
1956         }
1957         if (conf->peer_count > cap.max_rx_2_tx) {
1958                 RTE_ETHDEV_LOG(ERR,
1959                         "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
1960                         conf->peer_count, cap.max_rx_2_tx);
1961                 return -EINVAL;
1962         }
1963         if (conf->peer_count == 0) {
1964                 RTE_ETHDEV_LOG(ERR,
1965                         "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
1966                         conf->peer_count);
1967                 return -EINVAL;
1968         }
1969         for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1970              cap.max_nb_queues != UINT16_MAX; i++) {
1971                 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1972                         count++;
1973         }
1974         if (count > cap.max_nb_queues) {
1975                 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1976                 cap.max_nb_queues);
1977                 return -EINVAL;
1978         }
1979         if (dev->data->dev_started)
1980                 return -EBUSY;
1981         rxq = dev->data->rx_queues;
1982         if (rxq[rx_queue_id] != NULL) {
1983                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1984                                         -ENOTSUP);
1985                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1986                 rxq[rx_queue_id] = NULL;
1987         }
1988         ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1989                                                       nb_rx_desc, conf);
1990         if (ret == 0)
1991                 dev->data->rx_queue_state[rx_queue_id] =
1992                         RTE_ETH_QUEUE_STATE_HAIRPIN;
1993         return eth_err(port_id, ret);
1994 }
1995
1996 int
1997 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1998                        uint16_t nb_tx_desc, unsigned int socket_id,
1999                        const struct rte_eth_txconf *tx_conf)
2000 {
2001         struct rte_eth_dev *dev;
2002         struct rte_eth_dev_info dev_info;
2003         struct rte_eth_txconf local_conf;
2004         void **txq;
2005         int ret;
2006
2007         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2008
2009         dev = &rte_eth_devices[port_id];
2010         if (tx_queue_id >= dev->data->nb_tx_queues) {
2011                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2012                 return -EINVAL;
2013         }
2014
2015         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2016
2017         ret = rte_eth_dev_info_get(port_id, &dev_info);
2018         if (ret != 0)
2019                 return ret;
2020
2021         /* Use default specified by driver, if nb_tx_desc is zero */
2022         if (nb_tx_desc == 0) {
2023                 nb_tx_desc = dev_info.default_txportconf.ring_size;
2024                 /* If driver default is zero, fall back on EAL default */
2025                 if (nb_tx_desc == 0)
2026                         nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2027         }
2028         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2029             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2030             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2031                 RTE_ETHDEV_LOG(ERR,
2032                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2033                         nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2034                         dev_info.tx_desc_lim.nb_min,
2035                         dev_info.tx_desc_lim.nb_align);
2036                 return -EINVAL;
2037         }
2038
2039         if (dev->data->dev_started &&
2040                 !(dev_info.dev_capa &
2041                         RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2042                 return -EBUSY;
2043
2044         if (dev->data->dev_started &&
2045                 (dev->data->tx_queue_state[tx_queue_id] !=
2046                         RTE_ETH_QUEUE_STATE_STOPPED))
2047                 return -EBUSY;
2048
2049         txq = dev->data->tx_queues;
2050         if (txq[tx_queue_id]) {
2051                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2052                                         -ENOTSUP);
2053                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2054                 txq[tx_queue_id] = NULL;
2055         }
2056
2057         if (tx_conf == NULL)
2058                 tx_conf = &dev_info.default_txconf;
2059
2060         local_conf = *tx_conf;
2061
2062         /*
2063          * If an offloading has already been enabled in
2064          * rte_eth_dev_configure(), it has been enabled on all queues,
2065          * so there is no need to enable it in this queue again.
2066          * The local_conf.offloads input to underlying PMD only carries
2067          * those offloadings which are only enabled on this queue and
2068          * not enabled on all queues.
2069          */
2070         local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2071
2072         /*
2073          * New added offloadings for this queue are those not enabled in
2074          * rte_eth_dev_configure() and they must be per-queue type.
2075          * A pure per-port offloading can't be enabled on a queue while
2076          * disabled on another queue. A pure per-port offloading can't
2077          * be enabled for any queue as new added one if it hasn't been
2078          * enabled in rte_eth_dev_configure().
2079          */
2080         if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2081              local_conf.offloads) {
2082                 RTE_ETHDEV_LOG(ERR,
2083                         "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2084                         "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2085                         port_id, tx_queue_id, local_conf.offloads,
2086                         dev_info.tx_queue_offload_capa,
2087                         __func__);
2088                 return -EINVAL;
2089         }
2090
2091         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2092                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2093 }
2094
2095 int
2096 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2097                                uint16_t nb_tx_desc,
2098                                const struct rte_eth_hairpin_conf *conf)
2099 {
2100         struct rte_eth_dev *dev;
2101         struct rte_eth_hairpin_cap cap;
2102         void **txq;
2103         int i;
2104         int count;
2105         int ret;
2106
2107         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2108         dev = &rte_eth_devices[port_id];
2109         if (tx_queue_id >= dev->data->nb_tx_queues) {
2110                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2111                 return -EINVAL;
2112         }
2113         ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2114         if (ret != 0)
2115                 return ret;
2116         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2117                                 -ENOTSUP);
2118         /* if nb_rx_desc is zero use max number of desc from the driver. */
2119         if (nb_tx_desc == 0)
2120                 nb_tx_desc = cap.max_nb_desc;
2121         if (nb_tx_desc > cap.max_nb_desc) {
2122                 RTE_ETHDEV_LOG(ERR,
2123                         "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2124                         nb_tx_desc, cap.max_nb_desc);
2125                 return -EINVAL;
2126         }
2127         if (conf->peer_count > cap.max_tx_2_rx) {
2128                 RTE_ETHDEV_LOG(ERR,
2129                         "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
2130                         conf->peer_count, cap.max_tx_2_rx);
2131                 return -EINVAL;
2132         }
2133         if (conf->peer_count == 0) {
2134                 RTE_ETHDEV_LOG(ERR,
2135                         "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
2136                         conf->peer_count);
2137                 return -EINVAL;
2138         }
2139         for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2140              cap.max_nb_queues != UINT16_MAX; i++) {
2141                 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2142                         count++;
2143         }
2144         if (count > cap.max_nb_queues) {
2145                 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2146                 cap.max_nb_queues);
2147                 return -EINVAL;
2148         }
2149         if (dev->data->dev_started)
2150                 return -EBUSY;
2151         txq = dev->data->tx_queues;
2152         if (txq[tx_queue_id] != NULL) {
2153                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2154                                         -ENOTSUP);
2155                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2156                 txq[tx_queue_id] = NULL;
2157         }
2158         ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2159                 (dev, tx_queue_id, nb_tx_desc, conf);
2160         if (ret == 0)
2161                 dev->data->tx_queue_state[tx_queue_id] =
2162                         RTE_ETH_QUEUE_STATE_HAIRPIN;
2163         return eth_err(port_id, ret);
2164 }
2165
2166 void
2167 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2168                 void *userdata __rte_unused)
2169 {
2170         unsigned i;
2171
2172         for (i = 0; i < unsent; i++)
2173                 rte_pktmbuf_free(pkts[i]);
2174 }
2175
2176 void
2177 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2178                 void *userdata)
2179 {
2180         uint64_t *count = userdata;
2181         unsigned i;
2182
2183         for (i = 0; i < unsent; i++)
2184                 rte_pktmbuf_free(pkts[i]);
2185
2186         *count += unsent;
2187 }
2188
2189 int
2190 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2191                 buffer_tx_error_fn cbfn, void *userdata)
2192 {
2193         buffer->error_callback = cbfn;
2194         buffer->error_userdata = userdata;
2195         return 0;
2196 }
2197
2198 int
2199 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2200 {
2201         int ret = 0;
2202
2203         if (buffer == NULL)
2204                 return -EINVAL;
2205
2206         buffer->size = size;
2207         if (buffer->error_callback == NULL) {
2208                 ret = rte_eth_tx_buffer_set_err_callback(
2209                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
2210         }
2211
2212         return ret;
2213 }
2214
2215 int
2216 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2217 {
2218         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2219         int ret;
2220
2221         /* Validate Input Data. Bail if not valid or not supported. */
2222         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2223         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2224
2225         /* Call driver to free pending mbufs. */
2226         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2227                                                free_cnt);
2228         return eth_err(port_id, ret);
2229 }
2230
2231 int
2232 rte_eth_promiscuous_enable(uint16_t port_id)
2233 {
2234         struct rte_eth_dev *dev;
2235         int diag = 0;
2236
2237         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2238         dev = &rte_eth_devices[port_id];
2239
2240         if (dev->data->promiscuous == 1)
2241                 return 0;
2242
2243         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2244
2245         diag = (*dev->dev_ops->promiscuous_enable)(dev);
2246         dev->data->promiscuous = (diag == 0) ? 1 : 0;
2247
2248         return eth_err(port_id, diag);
2249 }
2250
2251 int
2252 rte_eth_promiscuous_disable(uint16_t port_id)
2253 {
2254         struct rte_eth_dev *dev;
2255         int diag = 0;
2256
2257         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2258         dev = &rte_eth_devices[port_id];
2259
2260         if (dev->data->promiscuous == 0)
2261                 return 0;
2262
2263         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2264
2265         dev->data->promiscuous = 0;
2266         diag = (*dev->dev_ops->promiscuous_disable)(dev);
2267         if (diag != 0)
2268                 dev->data->promiscuous = 1;
2269
2270         return eth_err(port_id, diag);
2271 }
2272
2273 int
2274 rte_eth_promiscuous_get(uint16_t port_id)
2275 {
2276         struct rte_eth_dev *dev;
2277
2278         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2279
2280         dev = &rte_eth_devices[port_id];
2281         return dev->data->promiscuous;
2282 }
2283
2284 int
2285 rte_eth_allmulticast_enable(uint16_t port_id)
2286 {
2287         struct rte_eth_dev *dev;
2288         int diag;
2289
2290         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2291         dev = &rte_eth_devices[port_id];
2292
2293         if (dev->data->all_multicast == 1)
2294                 return 0;
2295
2296         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2297         diag = (*dev->dev_ops->allmulticast_enable)(dev);
2298         dev->data->all_multicast = (diag == 0) ? 1 : 0;
2299
2300         return eth_err(port_id, diag);
2301 }
2302
2303 int
2304 rte_eth_allmulticast_disable(uint16_t port_id)
2305 {
2306         struct rte_eth_dev *dev;
2307         int diag;
2308
2309         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2310         dev = &rte_eth_devices[port_id];
2311
2312         if (dev->data->all_multicast == 0)
2313                 return 0;
2314
2315         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2316         dev->data->all_multicast = 0;
2317         diag = (*dev->dev_ops->allmulticast_disable)(dev);
2318         if (diag != 0)
2319                 dev->data->all_multicast = 1;
2320
2321         return eth_err(port_id, diag);
2322 }
2323
2324 int
2325 rte_eth_allmulticast_get(uint16_t port_id)
2326 {
2327         struct rte_eth_dev *dev;
2328
2329         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2330
2331         dev = &rte_eth_devices[port_id];
2332         return dev->data->all_multicast;
2333 }
2334
2335 int
2336 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2337 {
2338         struct rte_eth_dev *dev;
2339
2340         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2341         dev = &rte_eth_devices[port_id];
2342
2343         if (dev->data->dev_conf.intr_conf.lsc &&
2344             dev->data->dev_started)
2345                 rte_eth_linkstatus_get(dev, eth_link);
2346         else {
2347                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2348                 (*dev->dev_ops->link_update)(dev, 1);
2349                 *eth_link = dev->data->dev_link;
2350         }
2351
2352         return 0;
2353 }
2354
2355 int
2356 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2357 {
2358         struct rte_eth_dev *dev;
2359
2360         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2361         dev = &rte_eth_devices[port_id];
2362
2363         if (dev->data->dev_conf.intr_conf.lsc &&
2364             dev->data->dev_started)
2365                 rte_eth_linkstatus_get(dev, eth_link);
2366         else {
2367                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2368                 (*dev->dev_ops->link_update)(dev, 0);
2369                 *eth_link = dev->data->dev_link;
2370         }
2371
2372         return 0;
2373 }
2374
2375 int
2376 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2377 {
2378         struct rte_eth_dev *dev;
2379
2380         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2381
2382         dev = &rte_eth_devices[port_id];
2383         memset(stats, 0, sizeof(*stats));
2384
2385         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2386         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2387         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2388 }
2389
2390 int
2391 rte_eth_stats_reset(uint16_t port_id)
2392 {
2393         struct rte_eth_dev *dev;
2394         int ret;
2395
2396         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2397         dev = &rte_eth_devices[port_id];
2398
2399         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2400         ret = (*dev->dev_ops->stats_reset)(dev);
2401         if (ret != 0)
2402                 return eth_err(port_id, ret);
2403
2404         dev->data->rx_mbuf_alloc_failed = 0;
2405
2406         return 0;
2407 }
2408
2409 static inline int
2410 get_xstats_basic_count(struct rte_eth_dev *dev)
2411 {
2412         uint16_t nb_rxqs, nb_txqs;
2413         int count;
2414
2415         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2416         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2417
2418         count = RTE_NB_STATS;
2419         count += nb_rxqs * RTE_NB_RXQ_STATS;
2420         count += nb_txqs * RTE_NB_TXQ_STATS;
2421
2422         return count;
2423 }
2424
2425 static int
2426 get_xstats_count(uint16_t port_id)
2427 {
2428         struct rte_eth_dev *dev;
2429         int count;
2430
2431         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2432         dev = &rte_eth_devices[port_id];
2433         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2434                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2435                                 NULL, 0);
2436                 if (count < 0)
2437                         return eth_err(port_id, count);
2438         }
2439         if (dev->dev_ops->xstats_get_names != NULL) {
2440                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2441                 if (count < 0)
2442                         return eth_err(port_id, count);
2443         } else
2444                 count = 0;
2445
2446
2447         count += get_xstats_basic_count(dev);
2448
2449         return count;
2450 }
2451
2452 int
2453 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2454                 uint64_t *id)
2455 {
2456         int cnt_xstats, idx_xstat;
2457
2458         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2459
2460         if (!id) {
2461                 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2462                 return -ENOMEM;
2463         }
2464
2465         if (!xstat_name) {
2466                 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2467                 return -ENOMEM;
2468         }
2469
2470         /* Get count */
2471         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2472         if (cnt_xstats  < 0) {
2473                 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2474                 return -ENODEV;
2475         }
2476
2477         /* Get id-name lookup table */
2478         struct rte_eth_xstat_name xstats_names[cnt_xstats];
2479
2480         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2481                         port_id, xstats_names, cnt_xstats, NULL)) {
2482                 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2483                 return -1;
2484         }
2485
2486         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2487                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2488                         *id = idx_xstat;
2489                         return 0;
2490                 };
2491         }
2492
2493         return -EINVAL;
2494 }
2495
2496 /* retrieve basic stats names */
2497 static int
2498 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2499         struct rte_eth_xstat_name *xstats_names)
2500 {
2501         int cnt_used_entries = 0;
2502         uint32_t idx, id_queue;
2503         uint16_t num_q;
2504
2505         for (idx = 0; idx < RTE_NB_STATS; idx++) {
2506                 strlcpy(xstats_names[cnt_used_entries].name,
2507                         rte_stats_strings[idx].name,
2508                         sizeof(xstats_names[0].name));
2509                 cnt_used_entries++;
2510         }
2511         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2512         for (id_queue = 0; id_queue < num_q; id_queue++) {
2513                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2514                         snprintf(xstats_names[cnt_used_entries].name,
2515                                 sizeof(xstats_names[0].name),
2516                                 "rx_q%u%s",
2517                                 id_queue, rte_rxq_stats_strings[idx].name);
2518                         cnt_used_entries++;
2519                 }
2520
2521         }
2522         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2523         for (id_queue = 0; id_queue < num_q; id_queue++) {
2524                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2525                         snprintf(xstats_names[cnt_used_entries].name,
2526                                 sizeof(xstats_names[0].name),
2527                                 "tx_q%u%s",
2528                                 id_queue, rte_txq_stats_strings[idx].name);
2529                         cnt_used_entries++;
2530                 }
2531         }
2532         return cnt_used_entries;
2533 }
2534
2535 /* retrieve ethdev extended statistics names */
2536 int
2537 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2538         struct rte_eth_xstat_name *xstats_names, unsigned int size,
2539         uint64_t *ids)
2540 {
2541         struct rte_eth_xstat_name *xstats_names_copy;
2542         unsigned int no_basic_stat_requested = 1;
2543         unsigned int no_ext_stat_requested = 1;
2544         unsigned int expected_entries;
2545         unsigned int basic_count;
2546         struct rte_eth_dev *dev;
2547         unsigned int i;
2548         int ret;
2549
2550         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2551         dev = &rte_eth_devices[port_id];
2552
2553         basic_count = get_xstats_basic_count(dev);
2554         ret = get_xstats_count(port_id);
2555         if (ret < 0)
2556                 return ret;
2557         expected_entries = (unsigned int)ret;
2558
2559         /* Return max number of stats if no ids given */
2560         if (!ids) {
2561                 if (!xstats_names)
2562                         return expected_entries;
2563                 else if (xstats_names && size < expected_entries)
2564                         return expected_entries;
2565         }
2566
2567         if (ids && !xstats_names)
2568                 return -EINVAL;
2569
2570         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2571                 uint64_t ids_copy[size];
2572
2573                 for (i = 0; i < size; i++) {
2574                         if (ids[i] < basic_count) {
2575                                 no_basic_stat_requested = 0;
2576                                 break;
2577                         }
2578
2579                         /*
2580                          * Convert ids to xstats ids that PMD knows.
2581                          * ids known by user are basic + extended stats.
2582                          */
2583                         ids_copy[i] = ids[i] - basic_count;
2584                 }
2585
2586                 if (no_basic_stat_requested)
2587                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2588                                         xstats_names, ids_copy, size);
2589         }
2590
2591         /* Retrieve all stats */
2592         if (!ids) {
2593                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2594                                 expected_entries);
2595                 if (num_stats < 0 || num_stats > (int)expected_entries)
2596                         return num_stats;
2597                 else
2598                         return expected_entries;
2599         }
2600
2601         xstats_names_copy = calloc(expected_entries,
2602                 sizeof(struct rte_eth_xstat_name));
2603
2604         if (!xstats_names_copy) {
2605                 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2606                 return -ENOMEM;
2607         }
2608
2609         if (ids) {
2610                 for (i = 0; i < size; i++) {
2611                         if (ids[i] >= basic_count) {
2612                                 no_ext_stat_requested = 0;
2613                                 break;
2614                         }
2615                 }
2616         }
2617
2618         /* Fill xstats_names_copy structure */
2619         if (ids && no_ext_stat_requested) {
2620                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2621         } else {
2622                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2623                         expected_entries);
2624                 if (ret < 0) {
2625                         free(xstats_names_copy);
2626                         return ret;
2627                 }
2628         }
2629
2630         /* Filter stats */
2631         for (i = 0; i < size; i++) {
2632                 if (ids[i] >= expected_entries) {
2633                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2634                         free(xstats_names_copy);
2635                         return -1;
2636                 }
2637                 xstats_names[i] = xstats_names_copy[ids[i]];
2638         }
2639
2640         free(xstats_names_copy);
2641         return size;
2642 }
2643
2644 int
2645 rte_eth_xstats_get_names(uint16_t port_id,
2646         struct rte_eth_xstat_name *xstats_names,
2647         unsigned int size)
2648 {
2649         struct rte_eth_dev *dev;
2650         int cnt_used_entries;
2651         int cnt_expected_entries;
2652         int cnt_driver_entries;
2653
2654         cnt_expected_entries = get_xstats_count(port_id);
2655         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2656                         (int)size < cnt_expected_entries)
2657                 return cnt_expected_entries;
2658
2659         /* port_id checked in get_xstats_count() */
2660         dev = &rte_eth_devices[port_id];
2661
2662         cnt_used_entries = rte_eth_basic_stats_get_names(
2663                 dev, xstats_names);
2664
2665         if (dev->dev_ops->xstats_get_names != NULL) {
2666                 /* If there are any driver-specific xstats, append them
2667                  * to end of list.
2668                  */
2669                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2670                         dev,
2671                         xstats_names + cnt_used_entries,
2672                         size - cnt_used_entries);
2673                 if (cnt_driver_entries < 0)
2674                         return eth_err(port_id, cnt_driver_entries);
2675                 cnt_used_entries += cnt_driver_entries;
2676         }
2677
2678         return cnt_used_entries;
2679 }
2680
2681
2682 static int
2683 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2684 {
2685         struct rte_eth_dev *dev;
2686         struct rte_eth_stats eth_stats;
2687         unsigned int count = 0, i, q;
2688         uint64_t val, *stats_ptr;
2689         uint16_t nb_rxqs, nb_txqs;
2690         int ret;
2691
2692         ret = rte_eth_stats_get(port_id, &eth_stats);
2693         if (ret < 0)
2694                 return ret;
2695
2696         dev = &rte_eth_devices[port_id];
2697
2698         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2699         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2700
2701         /* global stats */
2702         for (i = 0; i < RTE_NB_STATS; i++) {
2703                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2704                                         rte_stats_strings[i].offset);
2705                 val = *stats_ptr;
2706                 xstats[count++].value = val;
2707         }
2708
2709         /* per-rxq stats */
2710         for (q = 0; q < nb_rxqs; q++) {
2711                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2712                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2713                                         rte_rxq_stats_strings[i].offset +
2714                                         q * sizeof(uint64_t));
2715                         val = *stats_ptr;
2716                         xstats[count++].value = val;
2717                 }
2718         }
2719
2720         /* per-txq stats */
2721         for (q = 0; q < nb_txqs; q++) {
2722                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2723                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2724                                         rte_txq_stats_strings[i].offset +
2725                                         q * sizeof(uint64_t));
2726                         val = *stats_ptr;
2727                         xstats[count++].value = val;
2728                 }
2729         }
2730         return count;
2731 }
2732
2733 /* retrieve ethdev extended statistics */
2734 int
2735 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2736                          uint64_t *values, unsigned int size)
2737 {
2738         unsigned int no_basic_stat_requested = 1;
2739         unsigned int no_ext_stat_requested = 1;
2740         unsigned int num_xstats_filled;
2741         unsigned int basic_count;
2742         uint16_t expected_entries;
2743         struct rte_eth_dev *dev;
2744         unsigned int i;
2745         int ret;
2746
2747         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2748         ret = get_xstats_count(port_id);
2749         if (ret < 0)
2750                 return ret;
2751         expected_entries = (uint16_t)ret;
2752         struct rte_eth_xstat xstats[expected_entries];
2753         dev = &rte_eth_devices[port_id];
2754         basic_count = get_xstats_basic_count(dev);
2755
2756         /* Return max number of stats if no ids given */
2757         if (!ids) {
2758                 if (!values)
2759                         return expected_entries;
2760                 else if (values && size < expected_entries)
2761                         return expected_entries;
2762         }
2763
2764         if (ids && !values)
2765                 return -EINVAL;
2766
2767         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2768                 unsigned int basic_count = get_xstats_basic_count(dev);
2769                 uint64_t ids_copy[size];
2770
2771                 for (i = 0; i < size; i++) {
2772                         if (ids[i] < basic_count) {
2773                                 no_basic_stat_requested = 0;
2774                                 break;
2775                         }
2776
2777                         /*
2778                          * Convert ids to xstats ids that PMD knows.
2779                          * ids known by user are basic + extended stats.
2780                          */
2781                         ids_copy[i] = ids[i] - basic_count;
2782                 }
2783
2784                 if (no_basic_stat_requested)
2785                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2786                                         values, size);
2787         }
2788
2789         if (ids) {
2790                 for (i = 0; i < size; i++) {
2791                         if (ids[i] >= basic_count) {
2792                                 no_ext_stat_requested = 0;
2793                                 break;
2794                         }
2795                 }
2796         }
2797
2798         /* Fill the xstats structure */
2799         if (ids && no_ext_stat_requested)
2800                 ret = rte_eth_basic_stats_get(port_id, xstats);
2801         else
2802                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2803
2804         if (ret < 0)
2805                 return ret;
2806         num_xstats_filled = (unsigned int)ret;
2807
2808         /* Return all stats */
2809         if (!ids) {
2810                 for (i = 0; i < num_xstats_filled; i++)
2811                         values[i] = xstats[i].value;
2812                 return expected_entries;
2813         }
2814
2815         /* Filter stats */
2816         for (i = 0; i < size; i++) {
2817                 if (ids[i] >= expected_entries) {
2818                         RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2819                         return -1;
2820                 }
2821                 values[i] = xstats[ids[i]].value;
2822         }
2823         return size;
2824 }
2825
2826 int
2827 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2828         unsigned int n)
2829 {
2830         struct rte_eth_dev *dev;
2831         unsigned int count = 0, i;
2832         signed int xcount = 0;
2833         uint16_t nb_rxqs, nb_txqs;
2834         int ret;
2835
2836         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2837
2838         dev = &rte_eth_devices[port_id];
2839
2840         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2841         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2842
2843         /* Return generic statistics */
2844         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2845                 (nb_txqs * RTE_NB_TXQ_STATS);
2846
2847         /* implemented by the driver */
2848         if (dev->dev_ops->xstats_get != NULL) {
2849                 /* Retrieve the xstats from the driver at the end of the
2850                  * xstats struct.
2851                  */
2852                 xcount = (*dev->dev_ops->xstats_get)(dev,
2853                                      xstats ? xstats + count : NULL,
2854                                      (n > count) ? n - count : 0);
2855
2856                 if (xcount < 0)
2857                         return eth_err(port_id, xcount);
2858         }
2859
2860         if (n < count + xcount || xstats == NULL)
2861                 return count + xcount;
2862
2863         /* now fill the xstats structure */
2864         ret = rte_eth_basic_stats_get(port_id, xstats);
2865         if (ret < 0)
2866                 return ret;
2867         count = ret;
2868
2869         for (i = 0; i < count; i++)
2870                 xstats[i].id = i;
2871         /* add an offset to driver-specific stats */
2872         for ( ; i < count + xcount; i++)
2873                 xstats[i].id += count;
2874
2875         return count + xcount;
2876 }
2877
2878 /* reset ethdev extended statistics */
2879 int
2880 rte_eth_xstats_reset(uint16_t port_id)
2881 {
2882         struct rte_eth_dev *dev;
2883
2884         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2885         dev = &rte_eth_devices[port_id];
2886
2887         /* implemented by the driver */
2888         if (dev->dev_ops->xstats_reset != NULL)
2889                 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
2890
2891         /* fallback to default */
2892         return rte_eth_stats_reset(port_id);
2893 }
2894
2895 static int
2896 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2897                 uint8_t is_rx)
2898 {
2899         struct rte_eth_dev *dev;
2900
2901         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2902
2903         dev = &rte_eth_devices[port_id];
2904
2905         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2906
2907         if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2908                 return -EINVAL;
2909
2910         if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2911                 return -EINVAL;
2912
2913         if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2914                 return -EINVAL;
2915
2916         return (*dev->dev_ops->queue_stats_mapping_set)
2917                         (dev, queue_id, stat_idx, is_rx);
2918 }
2919
2920
2921 int
2922 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2923                 uint8_t stat_idx)
2924 {
2925         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2926                                                 stat_idx, STAT_QMAP_TX));
2927 }
2928
2929
2930 int
2931 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2932                 uint8_t stat_idx)
2933 {
2934         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2935                                                 stat_idx, STAT_QMAP_RX));
2936 }
2937
2938 int
2939 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2940 {
2941         struct rte_eth_dev *dev;
2942
2943         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2944         dev = &rte_eth_devices[port_id];
2945
2946         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2947         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2948                                                         fw_version, fw_size));
2949 }
2950
2951 int
2952 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2953 {
2954         struct rte_eth_dev *dev;
2955         const struct rte_eth_desc_lim lim = {
2956                 .nb_max = UINT16_MAX,
2957                 .nb_min = 0,
2958                 .nb_align = 1,
2959                 .nb_seg_max = UINT16_MAX,
2960                 .nb_mtu_seg_max = UINT16_MAX,
2961         };
2962         int diag;
2963
2964         /*
2965          * Init dev_info before port_id check since caller does not have
2966          * return status and does not know if get is successful or not.
2967          */
2968         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2969
2970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2971         dev = &rte_eth_devices[port_id];
2972
2973         dev_info->rx_desc_lim = lim;
2974         dev_info->tx_desc_lim = lim;
2975         dev_info->device = dev->device;
2976         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2977         dev_info->max_mtu = UINT16_MAX;
2978
2979         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
2980         diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2981         if (diag != 0) {
2982                 /* Cleanup already filled in device information */
2983                 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2984                 return eth_err(port_id, diag);
2985         }
2986
2987         dev_info->driver_name = dev->device->driver->name;
2988         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2989         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2990
2991         dev_info->dev_flags = &dev->data->dev_flags;
2992
2993         return 0;
2994 }
2995
2996 int
2997 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2998                                  uint32_t *ptypes, int num)
2999 {
3000         int i, j;
3001         struct rte_eth_dev *dev;
3002         const uint32_t *all_ptypes;
3003
3004         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3005         dev = &rte_eth_devices[port_id];
3006         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3007         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3008
3009         if (!all_ptypes)
3010                 return 0;
3011
3012         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3013                 if (all_ptypes[i] & ptype_mask) {
3014                         if (j < num)
3015                                 ptypes[j] = all_ptypes[i];
3016                         j++;
3017                 }
3018
3019         return j;
3020 }
3021
3022 int
3023 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3024                                  uint32_t *set_ptypes, unsigned int num)
3025 {
3026         const uint32_t valid_ptype_masks[] = {
3027                 RTE_PTYPE_L2_MASK,
3028                 RTE_PTYPE_L3_MASK,
3029                 RTE_PTYPE_L4_MASK,
3030                 RTE_PTYPE_TUNNEL_MASK,
3031                 RTE_PTYPE_INNER_L2_MASK,
3032                 RTE_PTYPE_INNER_L3_MASK,
3033                 RTE_PTYPE_INNER_L4_MASK,
3034         };
3035         const uint32_t *all_ptypes;
3036         struct rte_eth_dev *dev;
3037         uint32_t unused_mask;
3038         unsigned int i, j;
3039         int ret;
3040
3041         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3042         dev = &rte_eth_devices[port_id];
3043
3044         if (num > 0 && set_ptypes == NULL)
3045                 return -EINVAL;
3046
3047         if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3048                         *dev->dev_ops->dev_ptypes_set == NULL) {
3049                 ret = 0;
3050                 goto ptype_unknown;
3051         }
3052
3053         if (ptype_mask == 0) {
3054                 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3055                                 ptype_mask);
3056                 goto ptype_unknown;
3057         }
3058
3059         unused_mask = ptype_mask;
3060         for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3061                 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3062                 if (mask && mask != valid_ptype_masks[i]) {
3063                         ret = -EINVAL;
3064                         goto ptype_unknown;
3065                 }
3066                 unused_mask &= ~valid_ptype_masks[i];
3067         }
3068
3069         if (unused_mask) {
3070                 ret = -EINVAL;
3071                 goto ptype_unknown;
3072         }
3073
3074         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3075         if (all_ptypes == NULL) {
3076                 ret = 0;
3077                 goto ptype_unknown;
3078         }
3079
3080         /*
3081          * Accommodate as many set_ptypes as possible. If the supplied
3082          * set_ptypes array is insufficient fill it partially.
3083          */
3084         for (i = 0, j = 0; set_ptypes != NULL &&
3085                                 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3086                 if (ptype_mask & all_ptypes[i]) {
3087                         if (j < num - 1) {
3088                                 set_ptypes[j] = all_ptypes[i];
3089                                 j++;
3090                                 continue;
3091                         }
3092                         break;
3093                 }
3094         }
3095
3096         if (set_ptypes != NULL && j < num)
3097                 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3098
3099         return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3100
3101 ptype_unknown:
3102         if (num > 0)
3103                 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3104
3105         return ret;
3106 }
3107
3108 int
3109 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3110 {
3111         struct rte_eth_dev *dev;
3112
3113         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3114         dev = &rte_eth_devices[port_id];
3115         rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3116
3117         return 0;
3118 }
3119
3120 int
3121 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3122 {
3123         struct rte_eth_dev *dev;
3124
3125         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3126
3127         dev = &rte_eth_devices[port_id];
3128         *mtu = dev->data->mtu;
3129         return 0;
3130 }
3131
3132 int
3133 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3134 {
3135         int ret;
3136         struct rte_eth_dev_info dev_info;
3137         struct rte_eth_dev *dev;
3138
3139         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3140         dev = &rte_eth_devices[port_id];
3141         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3142
3143         /*
3144          * Check if the device supports dev_infos_get, if it does not
3145          * skip min_mtu/max_mtu validation here as this requires values
3146          * that are populated within the call to rte_eth_dev_info_get()
3147          * which relies on dev->dev_ops->dev_infos_get.
3148          */
3149         if (*dev->dev_ops->dev_infos_get != NULL) {
3150                 ret = rte_eth_dev_info_get(port_id, &dev_info);
3151                 if (ret != 0)
3152                         return ret;
3153
3154                 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3155                         return -EINVAL;
3156         }
3157
3158         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3159         if (!ret)
3160                 dev->data->mtu = mtu;
3161
3162         return eth_err(port_id, ret);
3163 }
3164
3165 int
3166 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3167 {
3168         struct rte_eth_dev *dev;
3169         int ret;
3170
3171         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3172         dev = &rte_eth_devices[port_id];
3173         if (!(dev->data->dev_conf.rxmode.offloads &
3174               DEV_RX_OFFLOAD_VLAN_FILTER)) {
3175                 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3176                         port_id);
3177                 return -ENOSYS;
3178         }
3179
3180         if (vlan_id > 4095) {
3181                 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3182                         port_id, vlan_id);
3183                 return -EINVAL;
3184         }
3185         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3186
3187         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3188         if (ret == 0) {
3189                 struct rte_vlan_filter_conf *vfc;
3190                 int vidx;
3191                 int vbit;
3192
3193                 vfc = &dev->data->vlan_filter_conf;
3194                 vidx = vlan_id / 64;
3195                 vbit = vlan_id % 64;
3196
3197                 if (on)
3198                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
3199                 else
3200                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3201         }
3202
3203         return eth_err(port_id, ret);
3204 }
3205
3206 int
3207 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3208                                     int on)
3209 {
3210         struct rte_eth_dev *dev;
3211
3212         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3213         dev = &rte_eth_devices[port_id];
3214         if (rx_queue_id >= dev->data->nb_rx_queues) {
3215                 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3216                 return -EINVAL;
3217         }
3218
3219         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3220         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3221
3222         return 0;
3223 }
3224
3225 int
3226 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3227                                 enum rte_vlan_type vlan_type,
3228                                 uint16_t tpid)
3229 {
3230         struct rte_eth_dev *dev;
3231
3232         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3233         dev = &rte_eth_devices[port_id];
3234         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3235
3236         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3237                                                                tpid));
3238 }
3239
3240 int
3241 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3242 {
3243         struct rte_eth_dev *dev;
3244         int ret = 0;
3245         int mask = 0;
3246         int cur, org = 0;
3247         uint64_t orig_offloads;
3248         uint64_t *dev_offloads;
3249
3250         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3251         dev = &rte_eth_devices[port_id];
3252
3253         /* save original values in case of failure */
3254         orig_offloads = dev->data->dev_conf.rxmode.offloads;
3255         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3256
3257         /*check which option changed by application*/
3258         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
3259         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
3260         if (cur != org) {
3261                 if (cur)
3262                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3263                 else
3264                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3265                 mask |= ETH_VLAN_STRIP_MASK;
3266         }
3267
3268         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
3269         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
3270         if (cur != org) {
3271                 if (cur)
3272                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3273                 else
3274                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3275                 mask |= ETH_VLAN_FILTER_MASK;
3276         }
3277
3278         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
3279         org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
3280         if (cur != org) {
3281                 if (cur)
3282                         *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3283                 else
3284                         *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3285                 mask |= ETH_VLAN_EXTEND_MASK;
3286         }
3287
3288         cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3289         org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3290         if (cur != org) {
3291                 if (cur)
3292                         *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3293                 else
3294                         *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3295                 mask |= ETH_QINQ_STRIP_MASK;
3296         }
3297
3298         /*no change*/
3299         if (mask == 0)
3300                 return ret;
3301
3302         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3303         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3304         if (ret) {
3305                 /* hit an error restore  original values */
3306                 *dev_offloads = orig_offloads;
3307         }
3308
3309         return eth_err(port_id, ret);
3310 }
3311
3312 int
3313 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3314 {
3315         struct rte_eth_dev *dev;
3316         uint64_t *dev_offloads;
3317         int ret = 0;
3318
3319         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3320         dev = &rte_eth_devices[port_id];
3321         dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3322
3323         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3324                 ret |= ETH_VLAN_STRIP_OFFLOAD;
3325
3326         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3327                 ret |= ETH_VLAN_FILTER_OFFLOAD;
3328
3329         if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3330                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3331
3332         if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3333                 ret |= ETH_QINQ_STRIP_OFFLOAD;
3334
3335         return ret;
3336 }
3337
3338 int
3339 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3340 {
3341         struct rte_eth_dev *dev;
3342
3343         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3344         dev = &rte_eth_devices[port_id];
3345         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3346
3347         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3348 }
3349
3350 int
3351 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3352 {
3353         struct rte_eth_dev *dev;
3354
3355         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3356         dev = &rte_eth_devices[port_id];
3357         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3358         memset(fc_conf, 0, sizeof(*fc_conf));
3359         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3360 }
3361
3362 int
3363 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3364 {
3365         struct rte_eth_dev *dev;
3366
3367         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3368         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3369                 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3370                 return -EINVAL;
3371         }
3372
3373         dev = &rte_eth_devices[port_id];
3374         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3375         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3376 }
3377
3378 int
3379 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3380                                    struct rte_eth_pfc_conf *pfc_conf)
3381 {
3382         struct rte_eth_dev *dev;
3383
3384         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3385         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3386                 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3387                 return -EINVAL;
3388         }
3389
3390         dev = &rte_eth_devices[port_id];
3391         /* High water, low water validation are device specific */
3392         if  (*dev->dev_ops->priority_flow_ctrl_set)
3393                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3394                                         (dev, pfc_conf));
3395         return -ENOTSUP;
3396 }
3397
3398 static int
3399 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3400                         uint16_t reta_size)
3401 {
3402         uint16_t i, num;
3403
3404         if (!reta_conf)
3405                 return -EINVAL;
3406
3407         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3408         for (i = 0; i < num; i++) {
3409                 if (reta_conf[i].mask)
3410                         return 0;
3411         }
3412
3413         return -EINVAL;
3414 }
3415
3416 static int
3417 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3418                          uint16_t reta_size,
3419                          uint16_t max_rxq)
3420 {
3421         uint16_t i, idx, shift;
3422
3423         if (!reta_conf)
3424                 return -EINVAL;
3425
3426         if (max_rxq == 0) {
3427                 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3428                 return -EINVAL;
3429         }
3430
3431         for (i = 0; i < reta_size; i++) {
3432                 idx = i / RTE_RETA_GROUP_SIZE;
3433                 shift = i % RTE_RETA_GROUP_SIZE;
3434                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3435                         (reta_conf[idx].reta[shift] >= max_rxq)) {
3436                         RTE_ETHDEV_LOG(ERR,
3437                                 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3438                                 idx, shift,
3439                                 reta_conf[idx].reta[shift], max_rxq);
3440                         return -EINVAL;
3441                 }
3442         }
3443
3444         return 0;
3445 }
3446
3447 int
3448 rte_eth_dev_rss_reta_update(uint16_t port_id,
3449                             struct rte_eth_rss_reta_entry64 *reta_conf,
3450                             uint16_t reta_size)
3451 {
3452         struct rte_eth_dev *dev;
3453         int ret;
3454
3455         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3456         /* Check mask bits */
3457         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3458         if (ret < 0)
3459                 return ret;
3460
3461         dev = &rte_eth_devices[port_id];
3462
3463         /* Check entry value */
3464         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3465                                 dev->data->nb_rx_queues);
3466         if (ret < 0)
3467                 return ret;
3468
3469         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3470         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3471                                                              reta_size));
3472 }
3473
3474 int
3475 rte_eth_dev_rss_reta_query(uint16_t port_id,
3476                            struct rte_eth_rss_reta_entry64 *reta_conf,
3477                            uint16_t reta_size)
3478 {
3479         struct rte_eth_dev *dev;
3480         int ret;
3481
3482         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3483
3484         /* Check mask bits */
3485         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3486         if (ret < 0)
3487                 return ret;
3488
3489         dev = &rte_eth_devices[port_id];
3490         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3491         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3492                                                             reta_size));
3493 }
3494
3495 int
3496 rte_eth_dev_rss_hash_update(uint16_t port_id,
3497                             struct rte_eth_rss_conf *rss_conf)
3498 {
3499         struct rte_eth_dev *dev;
3500         struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3501         int ret;
3502
3503         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3504
3505         ret = rte_eth_dev_info_get(port_id, &dev_info);
3506         if (ret != 0)
3507                 return ret;
3508
3509         rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3510
3511         dev = &rte_eth_devices[port_id];
3512         if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3513             dev_info.flow_type_rss_offloads) {
3514                 RTE_ETHDEV_LOG(ERR,
3515                         "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3516                         port_id, rss_conf->rss_hf,
3517                         dev_info.flow_type_rss_offloads);
3518                 return -EINVAL;
3519         }
3520         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3521         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3522                                                                  rss_conf));
3523 }
3524
3525 int
3526 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3527                               struct rte_eth_rss_conf *rss_conf)
3528 {
3529         struct rte_eth_dev *dev;
3530
3531         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3532         dev = &rte_eth_devices[port_id];
3533         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3534         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3535                                                                    rss_conf));
3536 }
3537
3538 int
3539 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3540                                 struct rte_eth_udp_tunnel *udp_tunnel)
3541 {
3542         struct rte_eth_dev *dev;
3543
3544         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3545         if (udp_tunnel == NULL) {
3546                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3547                 return -EINVAL;
3548         }
3549
3550         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3551                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3552                 return -EINVAL;
3553         }
3554
3555         dev = &rte_eth_devices[port_id];
3556         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3557         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3558                                                                 udp_tunnel));
3559 }
3560
3561 int
3562 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3563                                    struct rte_eth_udp_tunnel *udp_tunnel)
3564 {
3565         struct rte_eth_dev *dev;
3566
3567         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3568         dev = &rte_eth_devices[port_id];
3569
3570         if (udp_tunnel == NULL) {
3571                 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3572                 return -EINVAL;
3573         }
3574
3575         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3576                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3577                 return -EINVAL;
3578         }
3579
3580         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3581         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3582                                                                 udp_tunnel));
3583 }
3584
3585 int
3586 rte_eth_led_on(uint16_t port_id)
3587 {
3588         struct rte_eth_dev *dev;
3589
3590         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3591         dev = &rte_eth_devices[port_id];
3592         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3593         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3594 }
3595
3596 int
3597 rte_eth_led_off(uint16_t port_id)
3598 {
3599         struct rte_eth_dev *dev;
3600
3601         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3602         dev = &rte_eth_devices[port_id];
3603         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3604         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3605 }
3606
3607 /*
3608  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3609  * an empty spot.
3610  */
3611 static int
3612 get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3613 {
3614         struct rte_eth_dev_info dev_info;
3615         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3616         unsigned i;
3617         int ret;
3618
3619         ret = rte_eth_dev_info_get(port_id, &dev_info);
3620         if (ret != 0)
3621                 return -1;
3622
3623         for (i = 0; i < dev_info.max_mac_addrs; i++)
3624                 if (memcmp(addr, &dev->data->mac_addrs[i],
3625                                 RTE_ETHER_ADDR_LEN) == 0)
3626                         return i;
3627
3628         return -1;
3629 }
3630
3631 static const struct rte_ether_addr null_mac_addr;
3632
3633 int
3634 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
3635                         uint32_t pool)
3636 {
3637         struct rte_eth_dev *dev;
3638         int index;
3639         uint64_t pool_mask;
3640         int ret;
3641
3642         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3643         dev = &rte_eth_devices[port_id];
3644         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3645
3646         if (rte_is_zero_ether_addr(addr)) {
3647                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3648                         port_id);
3649                 return -EINVAL;
3650         }
3651         if (pool >= ETH_64_POOLS) {
3652                 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3653                 return -EINVAL;
3654         }
3655
3656         index = get_mac_addr_index(port_id, addr);
3657         if (index < 0) {
3658                 index = get_mac_addr_index(port_id, &null_mac_addr);
3659                 if (index < 0) {
3660                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3661                                 port_id);
3662                         return -ENOSPC;
3663                 }
3664         } else {
3665                 pool_mask = dev->data->mac_pool_sel[index];
3666
3667                 /* Check if both MAC address and pool is already there, and do nothing */
3668                 if (pool_mask & (1ULL << pool))
3669                         return 0;
3670         }
3671
3672         /* Update NIC */
3673         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3674
3675         if (ret == 0) {
3676                 /* Update address in NIC data structure */
3677                 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
3678
3679                 /* Update pool bitmap in NIC data structure */
3680                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3681         }
3682
3683         return eth_err(port_id, ret);
3684 }
3685
3686 int
3687 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
3688 {
3689         struct rte_eth_dev *dev;
3690         int index;
3691
3692         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3693         dev = &rte_eth_devices[port_id];
3694         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3695
3696         index = get_mac_addr_index(port_id, addr);
3697         if (index == 0) {
3698                 RTE_ETHDEV_LOG(ERR,
3699                         "Port %u: Cannot remove default MAC address\n",
3700                         port_id);
3701                 return -EADDRINUSE;
3702         } else if (index < 0)
3703                 return 0;  /* Do nothing if address wasn't found */
3704
3705         /* Update NIC */
3706         (*dev->dev_ops->mac_addr_remove)(dev, index);
3707
3708         /* Update address in NIC data structure */
3709         rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
3710
3711         /* reset pool bitmap */
3712         dev->data->mac_pool_sel[index] = 0;
3713
3714         return 0;
3715 }
3716
3717 int
3718 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
3719 {
3720         struct rte_eth_dev *dev;
3721         int ret;
3722
3723         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3724
3725         if (!rte_is_valid_assigned_ether_addr(addr))
3726                 return -EINVAL;
3727
3728         dev = &rte_eth_devices[port_id];
3729         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3730
3731         ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3732         if (ret < 0)
3733                 return ret;
3734
3735         /* Update default address in NIC data structure */
3736         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3737
3738         return 0;
3739 }
3740
3741
3742 /*
3743  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3744  * an empty spot.
3745  */
3746 static int
3747 get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
3748 {
3749         struct rte_eth_dev_info dev_info;
3750         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3751         unsigned i;
3752         int ret;
3753
3754         ret = rte_eth_dev_info_get(port_id, &dev_info);
3755         if (ret != 0)
3756                 return -1;
3757
3758         if (!dev->data->hash_mac_addrs)
3759                 return -1;
3760
3761         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3762                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3763                         RTE_ETHER_ADDR_LEN) == 0)
3764                         return i;
3765
3766         return -1;
3767 }
3768
3769 int
3770 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
3771                                 uint8_t on)
3772 {
3773         int index;
3774         int ret;
3775         struct rte_eth_dev *dev;
3776
3777         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3778
3779         dev = &rte_eth_devices[port_id];
3780         if (rte_is_zero_ether_addr(addr)) {
3781                 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3782                         port_id);
3783                 return -EINVAL;
3784         }
3785
3786         index = get_hash_mac_addr_index(port_id, addr);
3787         /* Check if it's already there, and do nothing */
3788         if ((index >= 0) && on)
3789                 return 0;
3790
3791         if (index < 0) {
3792                 if (!on) {
3793                         RTE_ETHDEV_LOG(ERR,
3794                                 "Port %u: the MAC address was not set in UTA\n",
3795                                 port_id);
3796                         return -EINVAL;
3797                 }
3798
3799                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3800                 if (index < 0) {
3801                         RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3802                                 port_id);
3803                         return -ENOSPC;
3804                 }
3805         }
3806
3807         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3808         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3809         if (ret == 0) {
3810                 /* Update address in NIC data structure */
3811                 if (on)
3812                         rte_ether_addr_copy(addr,
3813                                         &dev->data->hash_mac_addrs[index]);
3814                 else
3815                         rte_ether_addr_copy(&null_mac_addr,
3816                                         &dev->data->hash_mac_addrs[index]);
3817         }
3818
3819         return eth_err(port_id, ret);
3820 }
3821
3822 int
3823 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3824 {
3825         struct rte_eth_dev *dev;
3826
3827         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3828
3829         dev = &rte_eth_devices[port_id];
3830
3831         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3832         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3833                                                                        on));
3834 }
3835
3836 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3837                                         uint16_t tx_rate)
3838 {
3839         struct rte_eth_dev *dev;
3840         struct rte_eth_dev_info dev_info;
3841         struct rte_eth_link link;
3842         int ret;
3843
3844         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3845
3846         ret = rte_eth_dev_info_get(port_id, &dev_info);
3847         if (ret != 0)
3848                 return ret;
3849
3850         dev = &rte_eth_devices[port_id];
3851         link = dev->data->dev_link;
3852
3853         if (queue_idx > dev_info.max_tx_queues) {
3854                 RTE_ETHDEV_LOG(ERR,
3855                         "Set queue rate limit:port %u: invalid queue id=%u\n",
3856                         port_id, queue_idx);
3857                 return -EINVAL;
3858         }
3859
3860         if (tx_rate > link.link_speed) {
3861                 RTE_ETHDEV_LOG(ERR,
3862                         "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3863                         tx_rate, link.link_speed);
3864                 return -EINVAL;
3865         }
3866
3867         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3868         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3869                                                         queue_idx, tx_rate));
3870 }
3871
3872 int
3873 rte_eth_mirror_rule_set(uint16_t port_id,
3874                         struct rte_eth_mirror_conf *mirror_conf,
3875                         uint8_t rule_id, uint8_t on)
3876 {
3877         struct rte_eth_dev *dev;
3878
3879         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3880         if (mirror_conf->rule_type == 0) {
3881                 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3882                 return -EINVAL;
3883         }
3884
3885         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3886                 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3887                         ETH_64_POOLS - 1);
3888                 return -EINVAL;
3889         }
3890
3891         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3892              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3893             (mirror_conf->pool_mask == 0)) {
3894                 RTE_ETHDEV_LOG(ERR,
3895                         "Invalid mirror pool, pool mask can not be 0\n");
3896                 return -EINVAL;
3897         }
3898
3899         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3900             mirror_conf->vlan.vlan_mask == 0) {
3901                 RTE_ETHDEV_LOG(ERR,
3902                         "Invalid vlan mask, vlan mask can not be 0\n");
3903                 return -EINVAL;
3904         }
3905
3906         dev = &rte_eth_devices[port_id];
3907         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3908
3909         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3910                                                 mirror_conf, rule_id, on));
3911 }
3912
3913 int
3914 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3915 {
3916         struct rte_eth_dev *dev;
3917
3918         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3919
3920         dev = &rte_eth_devices[port_id];
3921         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3922
3923         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3924                                                                    rule_id));
3925 }
3926
3927 RTE_INIT(eth_dev_init_cb_lists)
3928 {
3929         int i;
3930
3931         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3932                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3933 }
3934
3935 int
3936 rte_eth_dev_callback_register(uint16_t port_id,
3937                         enum rte_eth_event_type event,
3938                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3939 {
3940         struct rte_eth_dev *dev;
3941         struct rte_eth_dev_callback *user_cb;
3942         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3943         uint16_t last_port;
3944
3945         if (!cb_fn)
3946                 return -EINVAL;
3947
3948         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3949                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3950                 return -EINVAL;
3951         }
3952
3953         if (port_id == RTE_ETH_ALL) {
3954                 next_port = 0;
3955                 last_port = RTE_MAX_ETHPORTS - 1;
3956         } else {
3957                 next_port = last_port = port_id;
3958         }
3959
3960         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3961
3962         do {
3963                 dev = &rte_eth_devices[next_port];
3964
3965                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3966                         if (user_cb->cb_fn == cb_fn &&
3967                                 user_cb->cb_arg == cb_arg &&
3968                                 user_cb->event == event) {
3969                                 break;
3970                         }
3971                 }
3972
3973                 /* create a new callback. */
3974                 if (user_cb == NULL) {
3975                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3976                                 sizeof(struct rte_eth_dev_callback), 0);
3977                         if (user_cb != NULL) {
3978                                 user_cb->cb_fn = cb_fn;
3979                                 user_cb->cb_arg = cb_arg;
3980                                 user_cb->event = event;
3981                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3982                                                   user_cb, next);
3983                         } else {
3984                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3985                                 rte_eth_dev_callback_unregister(port_id, event,
3986                                                                 cb_fn, cb_arg);
3987                                 return -ENOMEM;
3988                         }
3989
3990                 }
3991         } while (++next_port <= last_port);
3992
3993         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3994         return 0;
3995 }
3996
3997 int
3998 rte_eth_dev_callback_unregister(uint16_t port_id,
3999                         enum rte_eth_event_type event,
4000                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4001 {
4002         int ret;
4003         struct rte_eth_dev *dev;
4004         struct rte_eth_dev_callback *cb, *next;
4005         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4006         uint16_t last_port;
4007
4008         if (!cb_fn)
4009                 return -EINVAL;
4010
4011         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4012                 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4013                 return -EINVAL;
4014         }
4015
4016         if (port_id == RTE_ETH_ALL) {
4017                 next_port = 0;
4018                 last_port = RTE_MAX_ETHPORTS - 1;
4019         } else {
4020                 next_port = last_port = port_id;
4021         }
4022
4023         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4024
4025         do {
4026                 dev = &rte_eth_devices[next_port];
4027                 ret = 0;
4028                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4029                      cb = next) {
4030
4031                         next = TAILQ_NEXT(cb, next);
4032
4033                         if (cb->cb_fn != cb_fn || cb->event != event ||
4034                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4035                                 continue;
4036
4037                         /*
4038                          * if this callback is not executing right now,
4039                          * then remove it.
4040                          */
4041                         if (cb->active == 0) {
4042                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4043                                 rte_free(cb);
4044                         } else {
4045                                 ret = -EAGAIN;
4046                         }
4047                 }
4048         } while (++next_port <= last_port);
4049
4050         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4051         return ret;
4052 }
4053
4054 int
4055 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4056         enum rte_eth_event_type event, void *ret_param)
4057 {
4058         struct rte_eth_dev_callback *cb_lst;
4059         struct rte_eth_dev_callback dev_cb;
4060         int rc = 0;
4061
4062         rte_spinlock_lock(&rte_eth_dev_cb_lock);
4063         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4064                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4065                         continue;
4066                 dev_cb = *cb_lst;
4067                 cb_lst->active = 1;
4068                 if (ret_param != NULL)
4069                         dev_cb.ret_param = ret_param;
4070
4071                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4072                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4073                                 dev_cb.cb_arg, dev_cb.ret_param);
4074                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4075                 cb_lst->active = 0;
4076         }
4077         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4078         return rc;
4079 }
4080
4081 void
4082 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4083 {
4084         if (dev == NULL)
4085                 return;
4086
4087         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4088
4089         dev->state = RTE_ETH_DEV_ATTACHED;
4090 }
4091
4092 int
4093 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4094 {
4095         uint32_t vec;
4096         struct rte_eth_dev *dev;
4097         struct rte_intr_handle *intr_handle;
4098         uint16_t qid;
4099         int rc;
4100
4101         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4102
4103         dev = &rte_eth_devices[port_id];
4104
4105         if (!dev->intr_handle) {
4106                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4107                 return -ENOTSUP;
4108         }
4109
4110         intr_handle = dev->intr_handle;
4111         if (!intr_handle->intr_vec) {
4112                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4113                 return -EPERM;
4114         }
4115
4116         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4117                 vec = intr_handle->intr_vec[qid];
4118                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4119                 if (rc && rc != -EEXIST) {
4120                         RTE_ETHDEV_LOG(ERR,
4121                                 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4122                                 port_id, qid, op, epfd, vec);
4123                 }
4124         }
4125
4126         return 0;
4127 }
4128
4129 int
4130 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4131 {
4132         struct rte_intr_handle *intr_handle;
4133         struct rte_eth_dev *dev;
4134         unsigned int efd_idx;
4135         uint32_t vec;
4136         int fd;
4137
4138         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4139
4140         dev = &rte_eth_devices[port_id];
4141
4142         if (queue_id >= dev->data->nb_rx_queues) {
4143                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4144                 return -1;
4145         }
4146
4147         if (!dev->intr_handle) {
4148                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4149                 return -1;
4150         }
4151
4152         intr_handle = dev->intr_handle;
4153         if (!intr_handle->intr_vec) {
4154                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4155                 return -1;
4156         }
4157
4158         vec = intr_handle->intr_vec[queue_id];
4159         efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4160                 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4161         fd = intr_handle->efds[efd_idx];
4162
4163         return fd;
4164 }
4165
4166 const struct rte_memzone *
4167 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4168                          uint16_t queue_id, size_t size, unsigned align,
4169                          int socket_id)
4170 {
4171         char z_name[RTE_MEMZONE_NAMESIZE];
4172         const struct rte_memzone *mz;
4173         int rc;
4174
4175         rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
4176                       dev->data->port_id, queue_id, ring_name);
4177         if (rc >= RTE_MEMZONE_NAMESIZE) {
4178                 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4179                 rte_errno = ENAMETOOLONG;
4180                 return NULL;
4181         }
4182
4183         mz = rte_memzone_lookup(z_name);
4184         if (mz)
4185                 return mz;
4186
4187         return rte_memzone_reserve_aligned(z_name, size, socket_id,
4188                         RTE_MEMZONE_IOVA_CONTIG, align);
4189 }
4190
4191 int
4192 rte_eth_dev_create(struct rte_device *device, const char *name,
4193         size_t priv_data_size,
4194         ethdev_bus_specific_init ethdev_bus_specific_init,
4195         void *bus_init_params,
4196         ethdev_init_t ethdev_init, void *init_params)
4197 {
4198         struct rte_eth_dev *ethdev;
4199         int retval;
4200
4201         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4202
4203         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4204                 ethdev = rte_eth_dev_allocate(name);
4205                 if (!ethdev)
4206                         return -ENODEV;
4207
4208                 if (priv_data_size) {
4209                         ethdev->data->dev_private = rte_zmalloc_socket(
4210                                 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4211                                 device->numa_node);
4212
4213                         if (!ethdev->data->dev_private) {
4214                                 RTE_LOG(ERR, EAL, "failed to allocate private data");
4215                                 retval = -ENOMEM;
4216                                 goto probe_failed;
4217                         }
4218                 }
4219         } else {
4220                 ethdev = rte_eth_dev_attach_secondary(name);
4221                 if (!ethdev) {
4222                         RTE_LOG(ERR, EAL, "secondary process attach failed, "
4223                                 "ethdev doesn't exist");
4224                         return  -ENODEV;
4225                 }
4226         }
4227
4228         ethdev->device = device;
4229
4230         if (ethdev_bus_specific_init) {
4231                 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4232                 if (retval) {
4233                         RTE_LOG(ERR, EAL,
4234                                 "ethdev bus specific initialisation failed");
4235                         goto probe_failed;
4236                 }
4237         }
4238
4239         retval = ethdev_init(ethdev, init_params);
4240         if (retval) {
4241                 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
4242                 goto probe_failed;
4243         }
4244
4245         rte_eth_dev_probing_finish(ethdev);
4246
4247         return retval;
4248
4249 probe_failed:
4250         rte_eth_dev_release_port(ethdev);
4251         return retval;
4252 }
4253
4254 int
4255 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4256         ethdev_uninit_t ethdev_uninit)
4257 {
4258         int ret;
4259
4260         ethdev = rte_eth_dev_allocated(ethdev->data->name);
4261         if (!ethdev)
4262                 return -ENODEV;
4263
4264         RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
4265
4266         ret = ethdev_uninit(ethdev);
4267         if (ret)
4268                 return ret;
4269
4270         return rte_eth_dev_release_port(ethdev);
4271 }
4272
4273 int
4274 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4275                           int epfd, int op, void *data)
4276 {
4277         uint32_t vec;
4278         struct rte_eth_dev *dev;
4279         struct rte_intr_handle *intr_handle;
4280         int rc;
4281
4282         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4283
4284         dev = &rte_eth_devices[port_id];
4285         if (queue_id >= dev->data->nb_rx_queues) {
4286                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4287                 return -EINVAL;
4288         }
4289
4290         if (!dev->intr_handle) {
4291                 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4292                 return -ENOTSUP;
4293         }
4294
4295         intr_handle = dev->intr_handle;
4296         if (!intr_handle->intr_vec) {
4297                 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4298                 return -EPERM;
4299         }
4300
4301         vec = intr_handle->intr_vec[queue_id];
4302         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4303         if (rc && rc != -EEXIST) {
4304                 RTE_ETHDEV_LOG(ERR,
4305                         "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4306                         port_id, queue_id, op, epfd, vec);
4307                 return rc;
4308         }
4309
4310         return 0;
4311 }
4312
4313 int
4314 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4315                            uint16_t queue_id)
4316 {
4317         struct rte_eth_dev *dev;
4318
4319         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4320
4321         dev = &rte_eth_devices[port_id];
4322
4323         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4324         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4325                                                                 queue_id));
4326 }
4327
4328 int
4329 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4330                             uint16_t queue_id)
4331 {
4332         struct rte_eth_dev *dev;
4333
4334         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4335
4336         dev = &rte_eth_devices[port_id];
4337
4338         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4339         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4340                                                                 queue_id));
4341 }
4342
4343
4344 int
4345 rte_eth_dev_filter_supported(uint16_t port_id,
4346                              enum rte_filter_type filter_type)
4347 {
4348         struct rte_eth_dev *dev;
4349
4350         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4351
4352         dev = &rte_eth_devices[port_id];
4353         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4354         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4355                                 RTE_ETH_FILTER_NOP, NULL);
4356 }
4357
4358 int
4359 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4360                         enum rte_filter_op filter_op, void *arg)
4361 {
4362         struct rte_eth_dev *dev;
4363
4364         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4365
4366         dev = &rte_eth_devices[port_id];
4367         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4368         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4369                                                              filter_op, arg));
4370 }
4371
4372 const struct rte_eth_rxtx_callback *
4373 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4374                 rte_rx_callback_fn fn, void *user_param)
4375 {
4376 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4377         rte_errno = ENOTSUP;
4378         return NULL;
4379 #endif
4380         struct rte_eth_dev *dev;
4381
4382         /* check input parameters */
4383         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4384                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4385                 rte_errno = EINVAL;
4386                 return NULL;
4387         }
4388         dev = &rte_eth_devices[port_id];
4389         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4390                 rte_errno = EINVAL;
4391                 return NULL;
4392         }
4393         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4394
4395         if (cb == NULL) {
4396                 rte_errno = ENOMEM;
4397                 return NULL;
4398         }
4399
4400         cb->fn.rx = fn;
4401         cb->param = user_param;
4402
4403         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4404         /* Add the callbacks in fifo order. */
4405         struct rte_eth_rxtx_callback *tail =
4406                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4407
4408         if (!tail) {
4409                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4410
4411         } else {
4412                 while (tail->next)
4413                         tail = tail->next;
4414                 tail->next = cb;
4415         }
4416         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4417
4418         return cb;
4419 }
4420
4421 const struct rte_eth_rxtx_callback *
4422 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4423                 rte_rx_callback_fn fn, void *user_param)
4424 {
4425 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4426         rte_errno = ENOTSUP;
4427         return NULL;
4428 #endif
4429         /* check input parameters */
4430         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4431                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4432                 rte_errno = EINVAL;
4433                 return NULL;
4434         }
4435
4436         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4437
4438         if (cb == NULL) {
4439                 rte_errno = ENOMEM;
4440                 return NULL;
4441         }
4442
4443         cb->fn.rx = fn;
4444         cb->param = user_param;
4445
4446         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4447         /* Add the callbacks at fisrt position*/
4448         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4449         rte_smp_wmb();
4450         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4451         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4452
4453         return cb;
4454 }
4455
4456 const struct rte_eth_rxtx_callback *
4457 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4458                 rte_tx_callback_fn fn, void *user_param)
4459 {
4460 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4461         rte_errno = ENOTSUP;
4462         return NULL;
4463 #endif
4464         struct rte_eth_dev *dev;
4465
4466         /* check input parameters */
4467         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4468                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4469                 rte_errno = EINVAL;
4470                 return NULL;
4471         }
4472
4473         dev = &rte_eth_devices[port_id];
4474         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4475                 rte_errno = EINVAL;
4476                 return NULL;
4477         }
4478
4479         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4480
4481         if (cb == NULL) {
4482                 rte_errno = ENOMEM;
4483                 return NULL;
4484         }
4485
4486         cb->fn.tx = fn;
4487         cb->param = user_param;
4488
4489         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4490         /* Add the callbacks in fifo order. */
4491         struct rte_eth_rxtx_callback *tail =
4492                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4493
4494         if (!tail) {
4495                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4496
4497         } else {
4498                 while (tail->next)
4499                         tail = tail->next;
4500                 tail->next = cb;
4501         }
4502         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4503
4504         return cb;
4505 }
4506
4507 int
4508 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4509                 const struct rte_eth_rxtx_callback *user_cb)
4510 {
4511 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4512         return -ENOTSUP;
4513 #endif
4514         /* Check input parameters. */
4515         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4516         if (user_cb == NULL ||
4517                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4518                 return -EINVAL;
4519
4520         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4521         struct rte_eth_rxtx_callback *cb;
4522         struct rte_eth_rxtx_callback **prev_cb;
4523         int ret = -EINVAL;
4524
4525         rte_spinlock_lock(&rte_eth_rx_cb_lock);
4526         prev_cb = &dev->post_rx_burst_cbs[queue_id];
4527         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4528                 cb = *prev_cb;
4529                 if (cb == user_cb) {
4530                         /* Remove the user cb from the callback list. */
4531                         *prev_cb = cb->next;
4532                         ret = 0;
4533                         break;
4534                 }
4535         }
4536         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4537
4538         return ret;
4539 }
4540
4541 int
4542 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4543                 const struct rte_eth_rxtx_callback *user_cb)
4544 {
4545 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4546         return -ENOTSUP;
4547 #endif
4548         /* Check input parameters. */
4549         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4550         if (user_cb == NULL ||
4551                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4552                 return -EINVAL;
4553
4554         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4555         int ret = -EINVAL;
4556         struct rte_eth_rxtx_callback *cb;
4557         struct rte_eth_rxtx_callback **prev_cb;
4558
4559         rte_spinlock_lock(&rte_eth_tx_cb_lock);
4560         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4561         for (; *prev_cb != NULL; prev_cb = &cb->next) {
4562                 cb = *prev_cb;
4563                 if (cb == user_cb) {
4564                         /* Remove the user cb from the callback list. */
4565                         *prev_cb = cb->next;
4566                         ret = 0;
4567                         break;
4568                 }
4569         }
4570         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4571
4572         return ret;
4573 }
4574
4575 int
4576 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4577         struct rte_eth_rxq_info *qinfo)
4578 {
4579         struct rte_eth_dev *dev;
4580
4581         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4582
4583         if (qinfo == NULL)
4584                 return -EINVAL;
4585
4586         dev = &rte_eth_devices[port_id];
4587         if (queue_id >= dev->data->nb_rx_queues) {
4588                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4589                 return -EINVAL;
4590         }
4591
4592         if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4593                 RTE_ETHDEV_LOG(INFO,
4594                         "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4595                         queue_id, port_id);
4596                 return -EINVAL;
4597         }
4598
4599         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4600
4601         memset(qinfo, 0, sizeof(*qinfo));
4602         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4603         return 0;
4604 }
4605
4606 int
4607 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4608         struct rte_eth_txq_info *qinfo)
4609 {
4610         struct rte_eth_dev *dev;
4611
4612         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4613
4614         if (qinfo == NULL)
4615                 return -EINVAL;
4616
4617         dev = &rte_eth_devices[port_id];
4618         if (queue_id >= dev->data->nb_tx_queues) {
4619                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4620                 return -EINVAL;
4621         }
4622
4623         if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4624                 RTE_ETHDEV_LOG(INFO,
4625                         "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4626                         queue_id, port_id);
4627                 return -EINVAL;
4628         }
4629
4630         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4631
4632         memset(qinfo, 0, sizeof(*qinfo));
4633         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4634
4635         return 0;
4636 }
4637
4638 int
4639 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4640                           struct rte_eth_burst_mode *mode)
4641 {
4642         struct rte_eth_dev *dev;
4643
4644         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4645
4646         if (mode == NULL)
4647                 return -EINVAL;
4648
4649         dev = &rte_eth_devices[port_id];
4650
4651         if (queue_id >= dev->data->nb_rx_queues) {
4652                 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4653                 return -EINVAL;
4654         }
4655
4656         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
4657         memset(mode, 0, sizeof(*mode));
4658         return eth_err(port_id,
4659                        dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
4660 }
4661
4662 int
4663 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4664                           struct rte_eth_burst_mode *mode)
4665 {
4666         struct rte_eth_dev *dev;
4667
4668         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4669
4670         if (mode == NULL)
4671                 return -EINVAL;
4672
4673         dev = &rte_eth_devices[port_id];
4674
4675         if (queue_id >= dev->data->nb_tx_queues) {
4676                 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4677                 return -EINVAL;
4678         }
4679
4680         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
4681         memset(mode, 0, sizeof(*mode));
4682         return eth_err(port_id,
4683                        dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
4684 }
4685
4686 int
4687 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4688                              struct rte_ether_addr *mc_addr_set,
4689                              uint32_t nb_mc_addr)
4690 {
4691         struct rte_eth_dev *dev;
4692
4693         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4694
4695         dev = &rte_eth_devices[port_id];
4696         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4697         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4698                                                 mc_addr_set, nb_mc_addr));
4699 }
4700
4701 int
4702 rte_eth_timesync_enable(uint16_t port_id)
4703 {
4704         struct rte_eth_dev *dev;
4705
4706         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4707         dev = &rte_eth_devices[port_id];
4708
4709         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4710         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4711 }
4712
4713 int
4714 rte_eth_timesync_disable(uint16_t port_id)
4715 {
4716         struct rte_eth_dev *dev;
4717
4718         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4719         dev = &rte_eth_devices[port_id];
4720
4721         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4722         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4723 }
4724
4725 int
4726 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4727                                    uint32_t flags)
4728 {
4729         struct rte_eth_dev *dev;
4730
4731         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4732         dev = &rte_eth_devices[port_id];
4733
4734         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4735         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4736                                 (dev, timestamp, flags));
4737 }
4738
4739 int
4740 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4741                                    struct timespec *timestamp)
4742 {
4743         struct rte_eth_dev *dev;
4744
4745         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4746         dev = &rte_eth_devices[port_id];
4747
4748         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4749         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4750                                 (dev, timestamp));
4751 }
4752
4753 int
4754 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4755 {
4756         struct rte_eth_dev *dev;
4757
4758         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4759         dev = &rte_eth_devices[port_id];
4760
4761         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4762         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4763                                                                       delta));
4764 }
4765
4766 int
4767 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4768 {
4769         struct rte_eth_dev *dev;
4770
4771         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4772         dev = &rte_eth_devices[port_id];
4773
4774         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4775         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4776                                                                 timestamp));
4777 }
4778
4779 int
4780 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4781 {
4782         struct rte_eth_dev *dev;
4783
4784         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4785         dev = &rte_eth_devices[port_id];
4786
4787         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4788         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4789                                                                 timestamp));
4790 }
4791
4792 int
4793 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
4794 {
4795         struct rte_eth_dev *dev;
4796
4797         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4798         dev = &rte_eth_devices[port_id];
4799
4800         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
4801         return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
4802 }
4803
4804 int
4805 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4806 {
4807         struct rte_eth_dev *dev;
4808
4809         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4810
4811         dev = &rte_eth_devices[port_id];
4812         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4813         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4814 }
4815
4816 int
4817 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4818 {
4819         struct rte_eth_dev *dev;
4820
4821         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4822
4823         dev = &rte_eth_devices[port_id];
4824         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4825         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4826 }
4827
4828 int
4829 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4830 {
4831         struct rte_eth_dev *dev;
4832
4833         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4834
4835         dev = &rte_eth_devices[port_id];
4836         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4837         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4838 }
4839
4840 int
4841 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4842 {
4843         struct rte_eth_dev *dev;
4844
4845         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4846
4847         dev = &rte_eth_devices[port_id];
4848         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4849         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4850 }
4851
4852 int
4853 rte_eth_dev_get_module_info(uint16_t port_id,
4854                             struct rte_eth_dev_module_info *modinfo)
4855 {
4856         struct rte_eth_dev *dev;
4857
4858         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4859
4860         dev = &rte_eth_devices[port_id];
4861         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4862         return (*dev->dev_ops->get_module_info)(dev, modinfo);
4863 }
4864
4865 int
4866 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4867                               struct rte_dev_eeprom_info *info)
4868 {
4869         struct rte_eth_dev *dev;
4870
4871         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4872
4873         dev = &rte_eth_devices[port_id];
4874         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4875         return (*dev->dev_ops->get_module_eeprom)(dev, info);
4876 }
4877
4878 int
4879 rte_eth_dev_get_dcb_info(uint16_t port_id,
4880                              struct rte_eth_dcb_info *dcb_info)
4881 {
4882         struct rte_eth_dev *dev;
4883
4884         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4885
4886         dev = &rte_eth_devices[port_id];
4887         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4888
4889         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4890         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4891 }
4892
4893 int
4894 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4895                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4896 {
4897         struct rte_eth_dev *dev;
4898
4899         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4900         if (l2_tunnel == NULL) {
4901                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4902                 return -EINVAL;
4903         }
4904
4905         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4906                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4907                 return -EINVAL;
4908         }
4909
4910         dev = &rte_eth_devices[port_id];
4911         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4912                                 -ENOTSUP);
4913         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4914                                                                 l2_tunnel));
4915 }
4916
4917 int
4918 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4919                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4920                                   uint32_t mask,
4921                                   uint8_t en)
4922 {
4923         struct rte_eth_dev *dev;
4924
4925         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4926
4927         if (l2_tunnel == NULL) {
4928                 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4929                 return -EINVAL;
4930         }
4931
4932         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4933                 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4934                 return -EINVAL;
4935         }
4936
4937         if (mask == 0) {
4938                 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4939                 return -EINVAL;
4940         }
4941
4942         dev = &rte_eth_devices[port_id];
4943         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4944                                 -ENOTSUP);
4945         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4946                                                         l2_tunnel, mask, en));
4947 }
4948
4949 static void
4950 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4951                            const struct rte_eth_desc_lim *desc_lim)
4952 {
4953         if (desc_lim->nb_align != 0)
4954                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4955
4956         if (desc_lim->nb_max != 0)
4957                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4958
4959         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4960 }
4961
4962 int
4963 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4964                                  uint16_t *nb_rx_desc,
4965                                  uint16_t *nb_tx_desc)
4966 {
4967         struct rte_eth_dev_info dev_info;
4968         int ret;
4969
4970         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4971
4972         ret = rte_eth_dev_info_get(port_id, &dev_info);
4973         if (ret != 0)
4974                 return ret;
4975
4976         if (nb_rx_desc != NULL)
4977                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4978
4979         if (nb_tx_desc != NULL)
4980                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4981
4982         return 0;
4983 }
4984
4985 int
4986 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
4987                                    struct rte_eth_hairpin_cap *cap)
4988 {
4989         struct rte_eth_dev *dev;
4990
4991         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4992
4993         dev = &rte_eth_devices[port_id];
4994         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
4995         memset(cap, 0, sizeof(*cap));
4996         return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
4997 }
4998
4999 int
5000 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5001 {
5002         if (dev->data->rx_queue_state[queue_id] ==
5003             RTE_ETH_QUEUE_STATE_HAIRPIN)
5004                 return 1;
5005         return 0;
5006 }
5007
5008 int
5009 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5010 {
5011         if (dev->data->tx_queue_state[queue_id] ==
5012             RTE_ETH_QUEUE_STATE_HAIRPIN)
5013                 return 1;
5014         return 0;
5015 }
5016
5017 int
5018 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5019 {
5020         struct rte_eth_dev *dev;
5021
5022         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5023
5024         if (pool == NULL)
5025                 return -EINVAL;
5026
5027         dev = &rte_eth_devices[port_id];
5028
5029         if (*dev->dev_ops->pool_ops_supported == NULL)
5030                 return 1; /* all pools are supported */
5031
5032         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5033 }
5034
5035 /**
5036  * A set of values to describe the possible states of a switch domain.
5037  */
5038 enum rte_eth_switch_domain_state {
5039         RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5040         RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5041 };
5042
5043 /**
5044  * Array of switch domains available for allocation. Array is sized to
5045  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5046  * ethdev ports in a single process.
5047  */
5048 static struct rte_eth_dev_switch {
5049         enum rte_eth_switch_domain_state state;
5050 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
5051
5052 int
5053 rte_eth_switch_domain_alloc(uint16_t *domain_id)
5054 {
5055         unsigned int i;
5056
5057         *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5058
5059         for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
5060                 i < RTE_MAX_ETHPORTS; i++) {
5061                 if (rte_eth_switch_domains[i].state ==
5062                         RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5063                         rte_eth_switch_domains[i].state =
5064                                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5065                         *domain_id = i;
5066                         return 0;
5067                 }
5068         }
5069
5070         return -ENOSPC;
5071 }
5072
5073 int
5074 rte_eth_switch_domain_free(uint16_t domain_id)
5075 {
5076         if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5077                 domain_id >= RTE_MAX_ETHPORTS)
5078                 return -EINVAL;
5079
5080         if (rte_eth_switch_domains[domain_id].state !=
5081                 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5082                 return -EINVAL;
5083
5084         rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5085
5086         return 0;
5087 }
5088
5089 static int
5090 rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5091 {
5092         int state;
5093         struct rte_kvargs_pair *pair;
5094         char *letter;
5095
5096         arglist->str = strdup(str_in);
5097         if (arglist->str == NULL)
5098                 return -ENOMEM;
5099
5100         letter = arglist->str;
5101         state = 0;
5102         arglist->count = 0;
5103         pair = &arglist->pairs[0];
5104         while (1) {
5105                 switch (state) {
5106                 case 0: /* Initial */
5107                         if (*letter == '=')
5108                                 return -EINVAL;
5109                         else if (*letter == '\0')
5110                                 return 0;
5111
5112                         state = 1;
5113                         pair->key = letter;
5114                         /* fall-thru */
5115
5116                 case 1: /* Parsing key */
5117                         if (*letter == '=') {
5118                                 *letter = '\0';
5119                                 pair->value = letter + 1;
5120                                 state = 2;
5121                         } else if (*letter == ',' || *letter == '\0')
5122                                 return -EINVAL;
5123                         break;
5124
5125
5126                 case 2: /* Parsing value */
5127                         if (*letter == '[')
5128                                 state = 3;
5129                         else if (*letter == ',') {
5130                                 *letter = '\0';
5131                                 arglist->count++;
5132                                 pair = &arglist->pairs[arglist->count];
5133                                 state = 0;
5134                         } else if (*letter == '\0') {
5135                                 letter--;
5136                                 arglist->count++;
5137                                 pair = &arglist->pairs[arglist->count];
5138                                 state = 0;
5139                         }
5140                         break;
5141
5142                 case 3: /* Parsing list */
5143                         if (*letter == ']')
5144                                 state = 2;
5145                         else if (*letter == '\0')
5146                                 return -EINVAL;
5147                         break;
5148                 }
5149                 letter++;
5150         }
5151 }
5152
5153 int
5154 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5155 {
5156         struct rte_kvargs args;
5157         struct rte_kvargs_pair *pair;
5158         unsigned int i;
5159         int result = 0;
5160
5161         memset(eth_da, 0, sizeof(*eth_da));
5162
5163         result = rte_eth_devargs_tokenise(&args, dargs);
5164         if (result < 0)
5165                 goto parse_cleanup;
5166
5167         for (i = 0; i < args.count; i++) {
5168                 pair = &args.pairs[i];
5169                 if (strcmp("representor", pair->key) == 0) {
5170                         result = rte_eth_devargs_parse_list(pair->value,
5171                                 rte_eth_devargs_parse_representor_ports,
5172                                 eth_da);
5173                         if (result < 0)
5174                                 goto parse_cleanup;
5175                 }
5176         }
5177
5178 parse_cleanup:
5179         if (args.str)
5180                 free(args.str);
5181
5182         return result;
5183 }
5184
5185 RTE_INIT(ethdev_init_log)
5186 {
5187         rte_eth_dev_logtype = rte_log_register("lib.ethdev");
5188         if (rte_eth_dev_logtype >= 0)
5189                 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
5190 }